root/kernel/sched/pelt.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. decay_load
  2. __accumulate_pelt_segments
  3. accumulate_sum
  4. ___update_load_sum
  5. ___update_load_avg
  6. __update_load_avg_blocked_se
  7. __update_load_avg_se
  8. __update_load_avg_cfs_rq
  9. update_rt_rq_load_avg
  10. update_dl_rq_load_avg
  11. update_irq_load_avg

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Per Entity Load Tracking
   4  *
   5  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   6  *
   7  *  Interactivity improvements by Mike Galbraith
   8  *  (C) 2007 Mike Galbraith <efault@gmx.de>
   9  *
  10  *  Various enhancements by Dmitry Adamushko.
  11  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  12  *
  13  *  Group scheduling enhancements by Srivatsa Vaddagiri
  14  *  Copyright IBM Corporation, 2007
  15  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  16  *
  17  *  Scaled math optimizations by Thomas Gleixner
  18  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  19  *
  20  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
  21  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  22  *
  23  *  Move PELT related code from fair.c into this pelt.c file
  24  *  Author: Vincent Guittot <vincent.guittot@linaro.org>
  25  */
  26 
  27 #include <linux/sched.h>
  28 #include "sched.h"
  29 #include "pelt.h"
  30 
  31 #include <trace/events/sched.h>
  32 
  33 /*
  34  * Approximate:
  35  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
  36  */
  37 static u64 decay_load(u64 val, u64 n)
  38 {
  39         unsigned int local_n;
  40 
  41         if (unlikely(n > LOAD_AVG_PERIOD * 63))
  42                 return 0;
  43 
  44         /* after bounds checking we can collapse to 32-bit */
  45         local_n = n;
  46 
  47         /*
  48          * As y^PERIOD = 1/2, we can combine
  49          *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
  50          * With a look-up table which covers y^n (n<PERIOD)
  51          *
  52          * To achieve constant time decay_load.
  53          */
  54         if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
  55                 val >>= local_n / LOAD_AVG_PERIOD;
  56                 local_n %= LOAD_AVG_PERIOD;
  57         }
  58 
  59         val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
  60         return val;
  61 }
  62 
  63 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
  64 {
  65         u32 c1, c2, c3 = d3; /* y^0 == 1 */
  66 
  67         /*
  68          * c1 = d1 y^p
  69          */
  70         c1 = decay_load((u64)d1, periods);
  71 
  72         /*
  73          *            p-1
  74          * c2 = 1024 \Sum y^n
  75          *            n=1
  76          *
  77          *              inf        inf
  78          *    = 1024 ( \Sum y^n - \Sum y^n - y^0 )
  79          *              n=0        n=p
  80          */
  81         c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
  82 
  83         return c1 + c2 + c3;
  84 }
  85 
  86 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
  87 
  88 /*
  89  * Accumulate the three separate parts of the sum; d1 the remainder
  90  * of the last (incomplete) period, d2 the span of full periods and d3
  91  * the remainder of the (incomplete) current period.
  92  *
  93  *           d1          d2           d3
  94  *           ^           ^            ^
  95  *           |           |            |
  96  *         |<->|<----------------->|<--->|
  97  * ... |---x---|------| ... |------|-----x (now)
  98  *
  99  *                           p-1
 100  * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
 101  *                           n=1
 102  *
 103  *    = u y^p +                                 (Step 1)
 104  *
 105  *                     p-1
 106  *      d1 y^p + 1024 \Sum y^n + d3 y^0         (Step 2)
 107  *                     n=1
 108  */
 109 static __always_inline u32
 110 accumulate_sum(u64 delta, struct sched_avg *sa,
 111                unsigned long load, unsigned long runnable, int running)
 112 {
 113         u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
 114         u64 periods;
 115 
 116         delta += sa->period_contrib;
 117         periods = delta / 1024; /* A period is 1024us (~1ms) */
 118 
 119         /*
 120          * Step 1: decay old *_sum if we crossed period boundaries.
 121          */
 122         if (periods) {
 123                 sa->load_sum = decay_load(sa->load_sum, periods);
 124                 sa->runnable_load_sum =
 125                         decay_load(sa->runnable_load_sum, periods);
 126                 sa->util_sum = decay_load((u64)(sa->util_sum), periods);
 127 
 128                 /*
 129                  * Step 2
 130                  */
 131                 delta %= 1024;
 132                 contrib = __accumulate_pelt_segments(periods,
 133                                 1024 - sa->period_contrib, delta);
 134         }
 135         sa->period_contrib = delta;
 136 
 137         if (load)
 138                 sa->load_sum += load * contrib;
 139         if (runnable)
 140                 sa->runnable_load_sum += runnable * contrib;
 141         if (running)
 142                 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
 143 
 144         return periods;
 145 }
 146 
 147 /*
 148  * We can represent the historical contribution to runnable average as the
 149  * coefficients of a geometric series.  To do this we sub-divide our runnable
 150  * history into segments of approximately 1ms (1024us); label the segment that
 151  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
 152  *
 153  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
 154  *      p0            p1           p2
 155  *     (now)       (~1ms ago)  (~2ms ago)
 156  *
 157  * Let u_i denote the fraction of p_i that the entity was runnable.
 158  *
 159  * We then designate the fractions u_i as our co-efficients, yielding the
 160  * following representation of historical load:
 161  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
 162  *
 163  * We choose y based on the with of a reasonably scheduling period, fixing:
 164  *   y^32 = 0.5
 165  *
 166  * This means that the contribution to load ~32ms ago (u_32) will be weighted
 167  * approximately half as much as the contribution to load within the last ms
 168  * (u_0).
 169  *
 170  * When a period "rolls over" and we have new u_0`, multiplying the previous
 171  * sum again by y is sufficient to update:
 172  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
 173  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
 174  */
 175 static __always_inline int
 176 ___update_load_sum(u64 now, struct sched_avg *sa,
 177                   unsigned long load, unsigned long runnable, int running)
 178 {
 179         u64 delta;
 180 
 181         delta = now - sa->last_update_time;
 182         /*
 183          * This should only happen when time goes backwards, which it
 184          * unfortunately does during sched clock init when we swap over to TSC.
 185          */
 186         if ((s64)delta < 0) {
 187                 sa->last_update_time = now;
 188                 return 0;
 189         }
 190 
 191         /*
 192          * Use 1024ns as the unit of measurement since it's a reasonable
 193          * approximation of 1us and fast to compute.
 194          */
 195         delta >>= 10;
 196         if (!delta)
 197                 return 0;
 198 
 199         sa->last_update_time += delta << 10;
 200 
 201         /*
 202          * running is a subset of runnable (weight) so running can't be set if
 203          * runnable is clear. But there are some corner cases where the current
 204          * se has been already dequeued but cfs_rq->curr still points to it.
 205          * This means that weight will be 0 but not running for a sched_entity
 206          * but also for a cfs_rq if the latter becomes idle. As an example,
 207          * this happens during idle_balance() which calls
 208          * update_blocked_averages()
 209          */
 210         if (!load)
 211                 runnable = running = 0;
 212 
 213         /*
 214          * Now we know we crossed measurement unit boundaries. The *_avg
 215          * accrues by two steps:
 216          *
 217          * Step 1: accumulate *_sum since last_update_time. If we haven't
 218          * crossed period boundaries, finish.
 219          */
 220         if (!accumulate_sum(delta, sa, load, runnable, running))
 221                 return 0;
 222 
 223         return 1;
 224 }
 225 
 226 static __always_inline void
 227 ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
 228 {
 229         u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
 230 
 231         /*
 232          * Step 2: update *_avg.
 233          */
 234         sa->load_avg = div_u64(load * sa->load_sum, divider);
 235         sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
 236         WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
 237 }
 238 
 239 /*
 240  * sched_entity:
 241  *
 242  *   task:
 243  *     se_runnable() == se_weight()
 244  *
 245  *   group: [ see update_cfs_group() ]
 246  *     se_weight()   = tg->weight * grq->load_avg / tg->load_avg
 247  *     se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
 248  *
 249  *   load_sum := runnable_sum
 250  *   load_avg = se_weight(se) * runnable_avg
 251  *
 252  *   runnable_load_sum := runnable_sum
 253  *   runnable_load_avg = se_runnable(se) * runnable_avg
 254  *
 255  * XXX collapse load_sum and runnable_load_sum
 256  *
 257  * cfq_rq:
 258  *
 259  *   load_sum = \Sum se_weight(se) * se->avg.load_sum
 260  *   load_avg = \Sum se->avg.load_avg
 261  *
 262  *   runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
 263  *   runnable_load_avg = \Sum se->avg.runable_load_avg
 264  */
 265 
 266 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
 267 {
 268         if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
 269                 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
 270                 trace_pelt_se_tp(se);
 271                 return 1;
 272         }
 273 
 274         return 0;
 275 }
 276 
 277 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
 278 {
 279         if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq,
 280                                 cfs_rq->curr == se)) {
 281 
 282                 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
 283                 cfs_se_util_change(&se->avg);
 284                 trace_pelt_se_tp(se);
 285                 return 1;
 286         }
 287 
 288         return 0;
 289 }
 290 
 291 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
 292 {
 293         if (___update_load_sum(now, &cfs_rq->avg,
 294                                 scale_load_down(cfs_rq->load.weight),
 295                                 scale_load_down(cfs_rq->runnable_weight),
 296                                 cfs_rq->curr != NULL)) {
 297 
 298                 ___update_load_avg(&cfs_rq->avg, 1, 1);
 299                 trace_pelt_cfs_tp(cfs_rq);
 300                 return 1;
 301         }
 302 
 303         return 0;
 304 }
 305 
 306 /*
 307  * rt_rq:
 308  *
 309  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
 310  *   util_sum = cpu_scale * load_sum
 311  *   runnable_load_sum = load_sum
 312  *
 313  *   load_avg and runnable_load_avg are not supported and meaningless.
 314  *
 315  */
 316 
 317 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
 318 {
 319         if (___update_load_sum(now, &rq->avg_rt,
 320                                 running,
 321                                 running,
 322                                 running)) {
 323 
 324                 ___update_load_avg(&rq->avg_rt, 1, 1);
 325                 trace_pelt_rt_tp(rq);
 326                 return 1;
 327         }
 328 
 329         return 0;
 330 }
 331 
 332 /*
 333  * dl_rq:
 334  *
 335  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
 336  *   util_sum = cpu_scale * load_sum
 337  *   runnable_load_sum = load_sum
 338  *
 339  */
 340 
 341 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
 342 {
 343         if (___update_load_sum(now, &rq->avg_dl,
 344                                 running,
 345                                 running,
 346                                 running)) {
 347 
 348                 ___update_load_avg(&rq->avg_dl, 1, 1);
 349                 trace_pelt_dl_tp(rq);
 350                 return 1;
 351         }
 352 
 353         return 0;
 354 }
 355 
 356 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 357 /*
 358  * irq:
 359  *
 360  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
 361  *   util_sum = cpu_scale * load_sum
 362  *   runnable_load_sum = load_sum
 363  *
 364  */
 365 
 366 int update_irq_load_avg(struct rq *rq, u64 running)
 367 {
 368         int ret = 0;
 369 
 370         /*
 371          * We can't use clock_pelt because irq time is not accounted in
 372          * clock_task. Instead we directly scale the running time to
 373          * reflect the real amount of computation
 374          */
 375         running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
 376         running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
 377 
 378         /*
 379          * We know the time that has been used by interrupt since last update
 380          * but we don't when. Let be pessimistic and assume that interrupt has
 381          * happened just before the update. This is not so far from reality
 382          * because interrupt will most probably wake up task and trig an update
 383          * of rq clock during which the metric is updated.
 384          * We start to decay with normal context time and then we add the
 385          * interrupt context time.
 386          * We can safely remove running from rq->clock because
 387          * rq->clock += delta with delta >= running
 388          */
 389         ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
 390                                 0,
 391                                 0,
 392                                 0);
 393         ret += ___update_load_sum(rq->clock, &rq->avg_irq,
 394                                 1,
 395                                 1,
 396                                 1);
 397 
 398         if (ret) {
 399                 ___update_load_avg(&rq->avg_irq, 1, 1);
 400                 trace_pelt_irq_tp(rq);
 401         }
 402 
 403         return ret;
 404 }
 405 #endif

/* [<][>][^][v][top][bottom][index][help] */