root/drivers/cpuidle/governors/menu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. which_bucket
  2. performance_multiplier
  3. get_typical_interval
  4. menu_select
  5. menu_reflect
  6. menu_update
  7. menu_enable_device
  8. init_menu

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * menu.c - the menu idle governor
   4  *
   5  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
   6  * Copyright (C) 2009 Intel Corporation
   7  * Author:
   8  *        Arjan van de Ven <arjan@linux.intel.com>
   9  */
  10 
  11 #include <linux/kernel.h>
  12 #include <linux/cpuidle.h>
  13 #include <linux/time.h>
  14 #include <linux/ktime.h>
  15 #include <linux/hrtimer.h>
  16 #include <linux/tick.h>
  17 #include <linux/sched.h>
  18 #include <linux/sched/loadavg.h>
  19 #include <linux/sched/stat.h>
  20 #include <linux/math64.h>
  21 
  22 /*
  23  * Please note when changing the tuning values:
  24  * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
  25  * a scaling operation multiplication may overflow on 32 bit platforms.
  26  * In that case, #define RESOLUTION as ULL to get 64 bit result:
  27  * #define RESOLUTION 1024ULL
  28  *
  29  * The default values do not overflow.
  30  */
  31 #define BUCKETS 12
  32 #define INTERVAL_SHIFT 3
  33 #define INTERVALS (1UL << INTERVAL_SHIFT)
  34 #define RESOLUTION 1024
  35 #define DECAY 8
  36 #define MAX_INTERESTING 50000
  37 
  38 
  39 /*
  40  * Concepts and ideas behind the menu governor
  41  *
  42  * For the menu governor, there are 3 decision factors for picking a C
  43  * state:
  44  * 1) Energy break even point
  45  * 2) Performance impact
  46  * 3) Latency tolerance (from pmqos infrastructure)
  47  * These these three factors are treated independently.
  48  *
  49  * Energy break even point
  50  * -----------------------
  51  * C state entry and exit have an energy cost, and a certain amount of time in
  52  * the  C state is required to actually break even on this cost. CPUIDLE
  53  * provides us this duration in the "target_residency" field. So all that we
  54  * need is a good prediction of how long we'll be idle. Like the traditional
  55  * menu governor, we start with the actual known "next timer event" time.
  56  *
  57  * Since there are other source of wakeups (interrupts for example) than
  58  * the next timer event, this estimation is rather optimistic. To get a
  59  * more realistic estimate, a correction factor is applied to the estimate,
  60  * that is based on historic behavior. For example, if in the past the actual
  61  * duration always was 50% of the next timer tick, the correction factor will
  62  * be 0.5.
  63  *
  64  * menu uses a running average for this correction factor, however it uses a
  65  * set of factors, not just a single factor. This stems from the realization
  66  * that the ratio is dependent on the order of magnitude of the expected
  67  * duration; if we expect 500 milliseconds of idle time the likelihood of
  68  * getting an interrupt very early is much higher than if we expect 50 micro
  69  * seconds of idle time. A second independent factor that has big impact on
  70  * the actual factor is if there is (disk) IO outstanding or not.
  71  * (as a special twist, we consider every sleep longer than 50 milliseconds
  72  * as perfect; there are no power gains for sleeping longer than this)
  73  *
  74  * For these two reasons we keep an array of 12 independent factors, that gets
  75  * indexed based on the magnitude of the expected duration as well as the
  76  * "is IO outstanding" property.
  77  *
  78  * Repeatable-interval-detector
  79  * ----------------------------
  80  * There are some cases where "next timer" is a completely unusable predictor:
  81  * Those cases where the interval is fixed, for example due to hardware
  82  * interrupt mitigation, but also due to fixed transfer rate devices such as
  83  * mice.
  84  * For this, we use a different predictor: We track the duration of the last 8
  85  * intervals and if the stand deviation of these 8 intervals is below a
  86  * threshold value, we use the average of these intervals as prediction.
  87  *
  88  * Limiting Performance Impact
  89  * ---------------------------
  90  * C states, especially those with large exit latencies, can have a real
  91  * noticeable impact on workloads, which is not acceptable for most sysadmins,
  92  * and in addition, less performance has a power price of its own.
  93  *
  94  * As a general rule of thumb, menu assumes that the following heuristic
  95  * holds:
  96  *     The busier the system, the less impact of C states is acceptable
  97  *
  98  * This rule-of-thumb is implemented using a performance-multiplier:
  99  * If the exit latency times the performance multiplier is longer than
 100  * the predicted duration, the C state is not considered a candidate
 101  * for selection due to a too high performance impact. So the higher
 102  * this multiplier is, the longer we need to be idle to pick a deep C
 103  * state, and thus the less likely a busy CPU will hit such a deep
 104  * C state.
 105  *
 106  * Two factors are used in determing this multiplier:
 107  * a value of 10 is added for each point of "per cpu load average" we have.
 108  * a value of 5 points is added for each process that is waiting for
 109  * IO on this CPU.
 110  * (these values are experimentally determined)
 111  *
 112  * The load average factor gives a longer term (few seconds) input to the
 113  * decision, while the iowait value gives a cpu local instantanious input.
 114  * The iowait factor may look low, but realize that this is also already
 115  * represented in the system load average.
 116  *
 117  */
 118 
 119 struct menu_device {
 120         int             needs_update;
 121         int             tick_wakeup;
 122 
 123         unsigned int    next_timer_us;
 124         unsigned int    bucket;
 125         unsigned int    correction_factor[BUCKETS];
 126         unsigned int    intervals[INTERVALS];
 127         int             interval_ptr;
 128 };
 129 
 130 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
 131 {
 132         int bucket = 0;
 133 
 134         /*
 135          * We keep two groups of stats; one with no
 136          * IO pending, one without.
 137          * This allows us to calculate
 138          * E(duration)|iowait
 139          */
 140         if (nr_iowaiters)
 141                 bucket = BUCKETS/2;
 142 
 143         if (duration < 10)
 144                 return bucket;
 145         if (duration < 100)
 146                 return bucket + 1;
 147         if (duration < 1000)
 148                 return bucket + 2;
 149         if (duration < 10000)
 150                 return bucket + 3;
 151         if (duration < 100000)
 152                 return bucket + 4;
 153         return bucket + 5;
 154 }
 155 
 156 /*
 157  * Return a multiplier for the exit latency that is intended
 158  * to take performance requirements into account.
 159  * The more performance critical we estimate the system
 160  * to be, the higher this multiplier, and thus the higher
 161  * the barrier to go to an expensive C state.
 162  */
 163 static inline int performance_multiplier(unsigned long nr_iowaiters)
 164 {
 165         /* for IO wait tasks (per cpu!) we add 10x each */
 166         return 1 + 10 * nr_iowaiters;
 167 }
 168 
 169 static DEFINE_PER_CPU(struct menu_device, menu_devices);
 170 
 171 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
 172 
 173 /*
 174  * Try detecting repeating patterns by keeping track of the last 8
 175  * intervals, and checking if the standard deviation of that set
 176  * of points is below a threshold. If it is... then use the
 177  * average of these 8 points as the estimated value.
 178  */
 179 static unsigned int get_typical_interval(struct menu_device *data,
 180                                          unsigned int predicted_us)
 181 {
 182         int i, divisor;
 183         unsigned int min, max, thresh, avg;
 184         uint64_t sum, variance;
 185 
 186         thresh = INT_MAX; /* Discard outliers above this value */
 187 
 188 again:
 189 
 190         /* First calculate the average of past intervals */
 191         min = UINT_MAX;
 192         max = 0;
 193         sum = 0;
 194         divisor = 0;
 195         for (i = 0; i < INTERVALS; i++) {
 196                 unsigned int value = data->intervals[i];
 197                 if (value <= thresh) {
 198                         sum += value;
 199                         divisor++;
 200                         if (value > max)
 201                                 max = value;
 202 
 203                         if (value < min)
 204                                 min = value;
 205                 }
 206         }
 207 
 208         /*
 209          * If the result of the computation is going to be discarded anyway,
 210          * avoid the computation altogether.
 211          */
 212         if (min >= predicted_us)
 213                 return UINT_MAX;
 214 
 215         if (divisor == INTERVALS)
 216                 avg = sum >> INTERVAL_SHIFT;
 217         else
 218                 avg = div_u64(sum, divisor);
 219 
 220         /* Then try to determine variance */
 221         variance = 0;
 222         for (i = 0; i < INTERVALS; i++) {
 223                 unsigned int value = data->intervals[i];
 224                 if (value <= thresh) {
 225                         int64_t diff = (int64_t)value - avg;
 226                         variance += diff * diff;
 227                 }
 228         }
 229         if (divisor == INTERVALS)
 230                 variance >>= INTERVAL_SHIFT;
 231         else
 232                 do_div(variance, divisor);
 233 
 234         /*
 235          * The typical interval is obtained when standard deviation is
 236          * small (stddev <= 20 us, variance <= 400 us^2) or standard
 237          * deviation is small compared to the average interval (avg >
 238          * 6*stddev, avg^2 > 36*variance). The average is smaller than
 239          * UINT_MAX aka U32_MAX, so computing its square does not
 240          * overflow a u64. We simply reject this candidate average if
 241          * the standard deviation is greater than 715 s (which is
 242          * rather unlikely).
 243          *
 244          * Use this result only if there is no timer to wake us up sooner.
 245          */
 246         if (likely(variance <= U64_MAX/36)) {
 247                 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
 248                                                         || variance <= 400) {
 249                         return avg;
 250                 }
 251         }
 252 
 253         /*
 254          * If we have outliers to the upside in our distribution, discard
 255          * those by setting the threshold to exclude these outliers, then
 256          * calculate the average and standard deviation again. Once we get
 257          * down to the bottom 3/4 of our samples, stop excluding samples.
 258          *
 259          * This can deal with workloads that have long pauses interspersed
 260          * with sporadic activity with a bunch of short pauses.
 261          */
 262         if ((divisor * 4) <= INTERVALS * 3)
 263                 return UINT_MAX;
 264 
 265         thresh = max - 1;
 266         goto again;
 267 }
 268 
 269 /**
 270  * menu_select - selects the next idle state to enter
 271  * @drv: cpuidle driver containing state data
 272  * @dev: the CPU
 273  * @stop_tick: indication on whether or not to stop the tick
 274  */
 275 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 276                        bool *stop_tick)
 277 {
 278         struct menu_device *data = this_cpu_ptr(&menu_devices);
 279         int latency_req = cpuidle_governor_latency_req(dev->cpu);
 280         int i;
 281         int idx;
 282         unsigned int interactivity_req;
 283         unsigned int predicted_us;
 284         unsigned long nr_iowaiters;
 285         ktime_t delta_next;
 286 
 287         if (data->needs_update) {
 288                 menu_update(drv, dev);
 289                 data->needs_update = 0;
 290         }
 291 
 292         /* determine the expected residency time, round up */
 293         data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
 294 
 295         nr_iowaiters = nr_iowait_cpu(dev->cpu);
 296         data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
 297 
 298         if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
 299             ((data->next_timer_us < drv->states[1].target_residency ||
 300               latency_req < drv->states[1].exit_latency) &&
 301              !drv->states[0].disabled && !dev->states_usage[0].disable)) {
 302                 /*
 303                  * In this case state[0] will be used no matter what, so return
 304                  * it right away and keep the tick running if state[0] is a
 305                  * polling one.
 306                  */
 307                 *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);
 308                 return 0;
 309         }
 310 
 311         /*
 312          * Force the result of multiplication to be 64 bits even if both
 313          * operands are 32 bits.
 314          * Make sure to round up for half microseconds.
 315          */
 316         predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
 317                                          data->correction_factor[data->bucket],
 318                                          RESOLUTION * DECAY);
 319         /*
 320          * Use the lowest expected idle interval to pick the idle state.
 321          */
 322         predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
 323 
 324         if (tick_nohz_tick_stopped()) {
 325                 /*
 326                  * If the tick is already stopped, the cost of possible short
 327                  * idle duration misprediction is much higher, because the CPU
 328                  * may be stuck in a shallow idle state for a long time as a
 329                  * result of it.  In that case say we might mispredict and use
 330                  * the known time till the closest timer event for the idle
 331                  * state selection.
 332                  */
 333                 if (predicted_us < TICK_USEC)
 334                         predicted_us = ktime_to_us(delta_next);
 335         } else {
 336                 /*
 337                  * Use the performance multiplier and the user-configurable
 338                  * latency_req to determine the maximum exit latency.
 339                  */
 340                 interactivity_req = predicted_us / performance_multiplier(nr_iowaiters);
 341                 if (latency_req > interactivity_req)
 342                         latency_req = interactivity_req;
 343         }
 344 
 345         /*
 346          * Find the idle state with the lowest power while satisfying
 347          * our constraints.
 348          */
 349         idx = -1;
 350         for (i = 0; i < drv->state_count; i++) {
 351                 struct cpuidle_state *s = &drv->states[i];
 352                 struct cpuidle_state_usage *su = &dev->states_usage[i];
 353 
 354                 if (s->disabled || su->disable)
 355                         continue;
 356 
 357                 if (idx == -1)
 358                         idx = i; /* first enabled state */
 359 
 360                 if (s->target_residency > predicted_us) {
 361                         /*
 362                          * Use a physical idle state, not busy polling, unless
 363                          * a timer is going to trigger soon enough.
 364                          */
 365                         if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
 366                             s->exit_latency <= latency_req &&
 367                             s->target_residency <= data->next_timer_us) {
 368                                 predicted_us = s->target_residency;
 369                                 idx = i;
 370                                 break;
 371                         }
 372                         if (predicted_us < TICK_USEC)
 373                                 break;
 374 
 375                         if (!tick_nohz_tick_stopped()) {
 376                                 /*
 377                                  * If the state selected so far is shallow,
 378                                  * waking up early won't hurt, so retain the
 379                                  * tick in that case and let the governor run
 380                                  * again in the next iteration of the loop.
 381                                  */
 382                                 predicted_us = drv->states[idx].target_residency;
 383                                 break;
 384                         }
 385 
 386                         /*
 387                          * If the state selected so far is shallow and this
 388                          * state's target residency matches the time till the
 389                          * closest timer event, select this one to avoid getting
 390                          * stuck in the shallow one for too long.
 391                          */
 392                         if (drv->states[idx].target_residency < TICK_USEC &&
 393                             s->target_residency <= ktime_to_us(delta_next))
 394                                 idx = i;
 395 
 396                         return idx;
 397                 }
 398                 if (s->exit_latency > latency_req)
 399                         break;
 400 
 401                 idx = i;
 402         }
 403 
 404         if (idx == -1)
 405                 idx = 0; /* No states enabled. Must use 0. */
 406 
 407         /*
 408          * Don't stop the tick if the selected state is a polling one or if the
 409          * expected idle duration is shorter than the tick period length.
 410          */
 411         if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
 412              predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
 413                 unsigned int delta_next_us = ktime_to_us(delta_next);
 414 
 415                 *stop_tick = false;
 416 
 417                 if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
 418                         /*
 419                          * The tick is not going to be stopped and the target
 420                          * residency of the state to be returned is not within
 421                          * the time until the next timer event including the
 422                          * tick, so try to correct that.
 423                          */
 424                         for (i = idx - 1; i >= 0; i--) {
 425                                 if (drv->states[i].disabled ||
 426                                     dev->states_usage[i].disable)
 427                                         continue;
 428 
 429                                 idx = i;
 430                                 if (drv->states[i].target_residency <= delta_next_us)
 431                                         break;
 432                         }
 433                 }
 434         }
 435 
 436         return idx;
 437 }
 438 
 439 /**
 440  * menu_reflect - records that data structures need update
 441  * @dev: the CPU
 442  * @index: the index of actual entered state
 443  *
 444  * NOTE: it's important to be fast here because this operation will add to
 445  *       the overall exit latency.
 446  */
 447 static void menu_reflect(struct cpuidle_device *dev, int index)
 448 {
 449         struct menu_device *data = this_cpu_ptr(&menu_devices);
 450 
 451         dev->last_state_idx = index;
 452         data->needs_update = 1;
 453         data->tick_wakeup = tick_nohz_idle_got_tick();
 454 }
 455 
 456 /**
 457  * menu_update - attempts to guess what happened after entry
 458  * @drv: cpuidle driver containing state data
 459  * @dev: the CPU
 460  */
 461 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 462 {
 463         struct menu_device *data = this_cpu_ptr(&menu_devices);
 464         int last_idx = dev->last_state_idx;
 465         struct cpuidle_state *target = &drv->states[last_idx];
 466         unsigned int measured_us;
 467         unsigned int new_factor;
 468 
 469         /*
 470          * Try to figure out how much time passed between entry to low
 471          * power state and occurrence of the wakeup event.
 472          *
 473          * If the entered idle state didn't support residency measurements,
 474          * we use them anyway if they are short, and if long,
 475          * truncate to the whole expected time.
 476          *
 477          * Any measured amount of time will include the exit latency.
 478          * Since we are interested in when the wakeup begun, not when it
 479          * was completed, we must subtract the exit latency. However, if
 480          * the measured amount of time is less than the exit latency,
 481          * assume the state was never reached and the exit latency is 0.
 482          */
 483 
 484         if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
 485                 /*
 486                  * The nohz code said that there wouldn't be any events within
 487                  * the tick boundary (if the tick was stopped), but the idle
 488                  * duration predictor had a differing opinion.  Since the CPU
 489                  * was woken up by a tick (that wasn't stopped after all), the
 490                  * predictor was not quite right, so assume that the CPU could
 491                  * have been idle long (but not forever) to help the idle
 492                  * duration predictor do a better job next time.
 493                  */
 494                 measured_us = 9 * MAX_INTERESTING / 10;
 495         } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
 496                    dev->poll_time_limit) {
 497                 /*
 498                  * The CPU exited the "polling" state due to a time limit, so
 499                  * the idle duration prediction leading to the selection of that
 500                  * state was inaccurate.  If a better prediction had been made,
 501                  * the CPU might have been woken up from idle by the next timer.
 502                  * Assume that to be the case.
 503                  */
 504                 measured_us = data->next_timer_us;
 505         } else {
 506                 /* measured value */
 507                 measured_us = dev->last_residency;
 508 
 509                 /* Deduct exit latency */
 510                 if (measured_us > 2 * target->exit_latency)
 511                         measured_us -= target->exit_latency;
 512                 else
 513                         measured_us /= 2;
 514         }
 515 
 516         /* Make sure our coefficients do not exceed unity */
 517         if (measured_us > data->next_timer_us)
 518                 measured_us = data->next_timer_us;
 519 
 520         /* Update our correction ratio */
 521         new_factor = data->correction_factor[data->bucket];
 522         new_factor -= new_factor / DECAY;
 523 
 524         if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
 525                 new_factor += RESOLUTION * measured_us / data->next_timer_us;
 526         else
 527                 /*
 528                  * we were idle so long that we count it as a perfect
 529                  * prediction
 530                  */
 531                 new_factor += RESOLUTION;
 532 
 533         /*
 534          * We don't want 0 as factor; we always want at least
 535          * a tiny bit of estimated time. Fortunately, due to rounding,
 536          * new_factor will stay nonzero regardless of measured_us values
 537          * and the compiler can eliminate this test as long as DECAY > 1.
 538          */
 539         if (DECAY == 1 && unlikely(new_factor == 0))
 540                 new_factor = 1;
 541 
 542         data->correction_factor[data->bucket] = new_factor;
 543 
 544         /* update the repeating-pattern data */
 545         data->intervals[data->interval_ptr++] = measured_us;
 546         if (data->interval_ptr >= INTERVALS)
 547                 data->interval_ptr = 0;
 548 }
 549 
 550 /**
 551  * menu_enable_device - scans a CPU's states and does setup
 552  * @drv: cpuidle driver
 553  * @dev: the CPU
 554  */
 555 static int menu_enable_device(struct cpuidle_driver *drv,
 556                                 struct cpuidle_device *dev)
 557 {
 558         struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
 559         int i;
 560 
 561         memset(data, 0, sizeof(struct menu_device));
 562 
 563         /*
 564          * if the correction factor is 0 (eg first time init or cpu hotplug
 565          * etc), we actually want to start out with a unity factor.
 566          */
 567         for(i = 0; i < BUCKETS; i++)
 568                 data->correction_factor[i] = RESOLUTION * DECAY;
 569 
 570         return 0;
 571 }
 572 
 573 static struct cpuidle_governor menu_governor = {
 574         .name =         "menu",
 575         .rating =       20,
 576         .enable =       menu_enable_device,
 577         .select =       menu_select,
 578         .reflect =      menu_reflect,
 579 };
 580 
 581 /**
 582  * init_menu - initializes the governor
 583  */
 584 static int __init init_menu(void)
 585 {
 586         return cpuidle_register_governor(&menu_governor);
 587 }
 588 
 589 postcore_initcall(init_menu);

/* [<][>][^][v][top][bottom][index][help] */