root/kernel/irq/timings.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. irq_timings_enable
  2. irq_timings_disable
  3. irq_timings_ema_new
  4. irq_timings_next_event_index
  5. __irq_timings_next_event
  6. irq_timings_interval_index
  7. __irq_timings_store
  8. irq_timings_store
  9. irq_timings_next_event
  10. irq_timings_free
  11. irq_timings_alloc
  12. irq_timings_test_next_index
  13. irq_timings_next_index_selftest
  14. irq_timings_test_irqs
  15. irq_timings_irqs_selftest
  16. irq_timings_test_irqts
  17. irq_timings_irqts_selftest
  18. irq_timings_selftest

   1 // SPDX-License-Identifier: GPL-2.0
   2 // Copyright (C) 2016, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
   3 #define pr_fmt(fmt) "irq_timings: " fmt
   4 
   5 #include <linux/kernel.h>
   6 #include <linux/percpu.h>
   7 #include <linux/slab.h>
   8 #include <linux/static_key.h>
   9 #include <linux/init.h>
  10 #include <linux/interrupt.h>
  11 #include <linux/idr.h>
  12 #include <linux/irq.h>
  13 #include <linux/math64.h>
  14 #include <linux/log2.h>
  15 
  16 #include <trace/events/irq.h>
  17 
  18 #include "internals.h"
  19 
  20 DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
  21 
  22 DEFINE_PER_CPU(struct irq_timings, irq_timings);
  23 
  24 static DEFINE_IDR(irqt_stats);
  25 
  26 void irq_timings_enable(void)
  27 {
  28         static_branch_enable(&irq_timing_enabled);
  29 }
  30 
  31 void irq_timings_disable(void)
  32 {
  33         static_branch_disable(&irq_timing_enabled);
  34 }
  35 
  36 /*
  37  * The main goal of this algorithm is to predict the next interrupt
  38  * occurrence on the current CPU.
  39  *
  40  * Currently, the interrupt timings are stored in a circular array
  41  * buffer every time there is an interrupt, as a tuple: the interrupt
  42  * number and the associated timestamp when the event occurred <irq,
  43  * timestamp>.
  44  *
  45  * For every interrupt occurring in a short period of time, we can
  46  * measure the elapsed time between the occurrences for the same
  47  * interrupt and we end up with a suite of intervals. The experience
  48  * showed the interrupts are often coming following a periodic
  49  * pattern.
  50  *
  51  * The objective of the algorithm is to find out this periodic pattern
  52  * in a fastest way and use its period to predict the next irq event.
  53  *
  54  * When the next interrupt event is requested, we are in the situation
  55  * where the interrupts are disabled and the circular buffer
  56  * containing the timings is filled with the events which happened
  57  * after the previous next-interrupt-event request.
  58  *
  59  * At this point, we read the circular buffer and we fill the irq
  60  * related statistics structure. After this step, the circular array
  61  * containing the timings is empty because all the values are
  62  * dispatched in their corresponding buffers.
  63  *
  64  * Now for each interrupt, we can predict the next event by using the
  65  * suffix array, log interval and exponential moving average
  66  *
  67  * 1. Suffix array
  68  *
  69  * Suffix array is an array of all the suffixes of a string. It is
  70  * widely used as a data structure for compression, text search, ...
  71  * For instance for the word 'banana', the suffixes will be: 'banana'
  72  * 'anana' 'nana' 'ana' 'na' 'a'
  73  *
  74  * Usually, the suffix array is sorted but for our purpose it is
  75  * not necessary and won't provide any improvement in the context of
  76  * the solved problem where we clearly define the boundaries of the
  77  * search by a max period and min period.
  78  *
  79  * The suffix array will build a suite of intervals of different
  80  * length and will look for the repetition of each suite. If the suite
  81  * is repeating then we have the period because it is the length of
  82  * the suite whatever its position in the buffer.
  83  *
  84  * 2. Log interval
  85  *
  86  * We saw the irq timings allow to compute the interval of the
  87  * occurrences for a specific interrupt. We can reasonibly assume the
  88  * longer is the interval, the higher is the error for the next event
  89  * and we can consider storing those interval values into an array
  90  * where each slot in the array correspond to an interval at the power
  91  * of 2 of the index. For example, index 12 will contain values
  92  * between 2^11 and 2^12.
  93  *
  94  * At the end we have an array of values where at each index defines a
  95  * [2^index - 1, 2 ^ index] interval values allowing to store a large
  96  * number of values inside a small array.
  97  *
  98  * For example, if we have the value 1123, then we store it at
  99  * ilog2(1123) = 10 index value.
 100  *
 101  * Storing those value at the specific index is done by computing an
 102  * exponential moving average for this specific slot. For instance,
 103  * for values 1800, 1123, 1453, ... fall under the same slot (10) and
 104  * the exponential moving average is computed every time a new value
 105  * is stored at this slot.
 106  *
 107  * 3. Exponential Moving Average
 108  *
 109  * The EMA is largely used to track a signal for stocks or as a low
 110  * pass filter. The magic of the formula, is it is very simple and the
 111  * reactivity of the average can be tuned with the factors called
 112  * alpha.
 113  *
 114  * The higher the alphas are, the faster the average respond to the
 115  * signal change. In our case, if a slot in the array is a big
 116  * interval, we can have numbers with a big difference between
 117  * them. The impact of those differences in the average computation
 118  * can be tuned by changing the alpha value.
 119  *
 120  *
 121  *  -- The algorithm --
 122  *
 123  * We saw the different processing above, now let's see how they are
 124  * used together.
 125  *
 126  * For each interrupt:
 127  *      For each interval:
 128  *              Compute the index = ilog2(interval)
 129  *              Compute a new_ema(buffer[index], interval)
 130  *              Store the index in a circular buffer
 131  *
 132  *      Compute the suffix array of the indexes
 133  *
 134  *      For each suffix:
 135  *              If the suffix is reverse-found 3 times
 136  *                      Return suffix
 137  *
 138  *      Return Not found
 139  *
 140  * However we can not have endless suffix array to be build, it won't
 141  * make sense and it will add an extra overhead, so we can restrict
 142  * this to a maximum suffix length of 5 and a minimum suffix length of
 143  * 2. The experience showed 5 is the majority of the maximum pattern
 144  * period found for different devices.
 145  *
 146  * The result is a pattern finding less than 1us for an interrupt.
 147  *
 148  * Example based on real values:
 149  *
 150  * Example 1 : MMC write/read interrupt interval:
 151  *
 152  *      223947, 1240, 1384, 1386, 1386,
 153  *      217416, 1236, 1384, 1386, 1387,
 154  *      214719, 1241, 1386, 1387, 1384,
 155  *      213696, 1234, 1384, 1386, 1388,
 156  *      219904, 1240, 1385, 1389, 1385,
 157  *      212240, 1240, 1386, 1386, 1386,
 158  *      214415, 1236, 1384, 1386, 1387,
 159  *      214276, 1234, 1384, 1388, ?
 160  *
 161  * For each element, apply ilog2(value)
 162  *
 163  *      15, 8, 8, 8, 8,
 164  *      15, 8, 8, 8, 8,
 165  *      15, 8, 8, 8, 8,
 166  *      15, 8, 8, 8, 8,
 167  *      15, 8, 8, 8, 8,
 168  *      15, 8, 8, 8, 8,
 169  *      15, 8, 8, 8, 8,
 170  *      15, 8, 8, 8, ?
 171  *
 172  * Max period of 5, we take the last (max_period * 3) 15 elements as
 173  * we can be confident if the pattern repeats itself three times it is
 174  * a repeating pattern.
 175  *
 176  *                   8,
 177  *      15, 8, 8, 8, 8,
 178  *      15, 8, 8, 8, 8,
 179  *      15, 8, 8, 8, ?
 180  *
 181  * Suffixes are:
 182  *
 183  *  1) 8, 15, 8, 8, 8  <- max period
 184  *  2) 8, 15, 8, 8
 185  *  3) 8, 15, 8
 186  *  4) 8, 15           <- min period
 187  *
 188  * From there we search the repeating pattern for each suffix.
 189  *
 190  * buffer: 8, 15, 8, 8, 8, 8, 15, 8, 8, 8, 8, 15, 8, 8, 8
 191  *         |   |  |  |  |  |   |  |  |  |  |   |  |  |  |
 192  *         8, 15, 8, 8, 8  |   |  |  |  |  |   |  |  |  |
 193  *                         8, 15, 8, 8, 8  |   |  |  |  |
 194  *                                         8, 15, 8, 8, 8
 195  *
 196  * When moving the suffix, we found exactly 3 matches.
 197  *
 198  * The first suffix with period 5 is repeating.
 199  *
 200  * The next event is (3 * max_period) % suffix_period
 201  *
 202  * In this example, the result 0, so the next event is suffix[0] => 8
 203  *
 204  * However, 8 is the index in the array of exponential moving average
 205  * which was calculated on the fly when storing the values, so the
 206  * interval is ema[8] = 1366
 207  *
 208  *
 209  * Example 2:
 210  *
 211  *      4, 3, 5, 100,
 212  *      3, 3, 5, 117,
 213  *      4, 4, 5, 112,
 214  *      4, 3, 4, 110,
 215  *      3, 5, 3, 117,
 216  *      4, 4, 5, 112,
 217  *      4, 3, 4, 110,
 218  *      3, 4, 5, 112,
 219  *      4, 3, 4, 110
 220  *
 221  * ilog2
 222  *
 223  *      0, 0, 0, 4,
 224  *      0, 0, 0, 4,
 225  *      0, 0, 0, 4,
 226  *      0, 0, 0, 4,
 227  *      0, 0, 0, 4,
 228  *      0, 0, 0, 4,
 229  *      0, 0, 0, 4,
 230  *      0, 0, 0, 4,
 231  *      0, 0, 0, 4
 232  *
 233  * Max period 5:
 234  *         0, 0, 4,
 235  *      0, 0, 0, 4,
 236  *      0, 0, 0, 4,
 237  *      0, 0, 0, 4
 238  *
 239  * Suffixes:
 240  *
 241  *  1) 0, 0, 4, 0, 0
 242  *  2) 0, 0, 4, 0
 243  *  3) 0, 0, 4
 244  *  4) 0, 0
 245  *
 246  * buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4
 247  *         |  |  |  |  |  |  X
 248  *         0, 0, 4, 0, 0, |  X
 249  *                        0, 0
 250  *
 251  * buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4
 252  *         |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
 253  *         0, 0, 4, 0, |  |  |  |  |  |  |  |  |  |  |
 254  *                     0, 0, 4, 0, |  |  |  |  |  |  |
 255  *                                 0, 0, 4, 0, |  |  |
 256  *                                             0  0  4
 257  *
 258  * Pattern is found 3 times, the remaining is 1 which results from
 259  * (max_period * 3) % suffix_period. This value is the index in the
 260  * suffix arrays. The suffix array for a period 4 has the value 4
 261  * at index 1.
 262  */
 263 #define EMA_ALPHA_VAL           64
 264 #define EMA_ALPHA_SHIFT         7
 265 
 266 #define PREDICTION_PERIOD_MIN   3
 267 #define PREDICTION_PERIOD_MAX   5
 268 #define PREDICTION_FACTOR       4
 269 #define PREDICTION_MAX          10 /* 2 ^ PREDICTION_MAX useconds */
 270 #define PREDICTION_BUFFER_SIZE  16 /* slots for EMAs, hardly more than 16 */
 271 
 272 /*
 273  * Number of elements in the circular buffer: If it happens it was
 274  * flushed before, then the number of elements could be smaller than
 275  * IRQ_TIMINGS_SIZE, so the count is used, otherwise the array size is
 276  * used as we wrapped. The index begins from zero when we did not
 277  * wrap. That could be done in a nicer way with the proper circular
 278  * array structure type but with the cost of extra computation in the
 279  * interrupt handler hot path. We choose efficiency.
 280  */
 281 #define for_each_irqts(i, irqts)                                        \
 282         for (i = irqts->count < IRQ_TIMINGS_SIZE ?                      \
 283                      0 : irqts->count & IRQ_TIMINGS_MASK,               \
 284                      irqts->count = min(IRQ_TIMINGS_SIZE,               \
 285                                         irqts->count);                  \
 286              irqts->count > 0; irqts->count--,                          \
 287                      i = (i + 1) & IRQ_TIMINGS_MASK)
 288 
 289 struct irqt_stat {
 290         u64     last_ts;
 291         u64     ema_time[PREDICTION_BUFFER_SIZE];
 292         int     timings[IRQ_TIMINGS_SIZE];
 293         int     circ_timings[IRQ_TIMINGS_SIZE];
 294         int     count;
 295 };
 296 
 297 /*
 298  * Exponential moving average computation
 299  */
 300 static u64 irq_timings_ema_new(u64 value, u64 ema_old)
 301 {
 302         s64 diff;
 303 
 304         if (unlikely(!ema_old))
 305                 return value;
 306 
 307         diff = (value - ema_old) * EMA_ALPHA_VAL;
 308         /*
 309          * We can use a s64 type variable to be added with the u64
 310          * ema_old variable as this one will never have its topmost
 311          * bit set, it will be always smaller than 2^63 nanosec
 312          * interrupt interval (292 years).
 313          */
 314         return ema_old + (diff >> EMA_ALPHA_SHIFT);
 315 }
 316 
 317 static int irq_timings_next_event_index(int *buffer, size_t len, int period_max)
 318 {
 319         int period;
 320 
 321         /*
 322          * Move the beginning pointer to the end minus the max period x 3.
 323          * We are at the point we can begin searching the pattern
 324          */
 325         buffer = &buffer[len - (period_max * 3)];
 326 
 327         /* Adjust the length to the maximum allowed period x 3 */
 328         len = period_max * 3;
 329 
 330         /*
 331          * The buffer contains the suite of intervals, in a ilog2
 332          * basis, we are looking for a repetition. We point the
 333          * beginning of the search three times the length of the
 334          * period beginning at the end of the buffer. We do that for
 335          * each suffix.
 336          */
 337         for (period = period_max; period >= PREDICTION_PERIOD_MIN; period--) {
 338 
 339                 /*
 340                  * The first comparison always succeed because the
 341                  * suffix is deduced from the first n-period bytes of
 342                  * the buffer and we compare the initial suffix with
 343                  * itself, so we can skip the first iteration.
 344                  */
 345                 int idx = period;
 346                 size_t size = period;
 347 
 348                 /*
 349                  * We look if the suite with period 'i' repeat
 350                  * itself. If it is truncated at the end, as it
 351                  * repeats we can use the period to find out the next
 352                  * element with the modulo.
 353                  */
 354                 while (!memcmp(buffer, &buffer[idx], size * sizeof(int))) {
 355 
 356                         /*
 357                          * Move the index in a period basis
 358                          */
 359                         idx += size;
 360 
 361                         /*
 362                          * If this condition is reached, all previous
 363                          * memcmp were successful, so the period is
 364                          * found.
 365                          */
 366                         if (idx == len)
 367                                 return buffer[len % period];
 368 
 369                         /*
 370                          * If the remaining elements to compare are
 371                          * smaller than the period, readjust the size
 372                          * of the comparison for the last iteration.
 373                          */
 374                         if (len - idx < period)
 375                                 size = len - idx;
 376                 }
 377         }
 378 
 379         return -1;
 380 }
 381 
 382 static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now)
 383 {
 384         int index, i, period_max, count, start, min = INT_MAX;
 385 
 386         if ((now - irqs->last_ts) >= NSEC_PER_SEC) {
 387                 irqs->count = irqs->last_ts = 0;
 388                 return U64_MAX;
 389         }
 390 
 391         /*
 392          * As we want to find three times the repetition, we need a
 393          * number of intervals greater or equal to three times the
 394          * maximum period, otherwise we truncate the max period.
 395          */
 396         period_max = irqs->count > (3 * PREDICTION_PERIOD_MAX) ?
 397                 PREDICTION_PERIOD_MAX : irqs->count / 3;
 398 
 399         /*
 400          * If we don't have enough irq timings for this prediction,
 401          * just bail out.
 402          */
 403         if (period_max <= PREDICTION_PERIOD_MIN)
 404                 return U64_MAX;
 405 
 406         /*
 407          * 'count' will depends if the circular buffer wrapped or not
 408          */
 409         count = irqs->count < IRQ_TIMINGS_SIZE ?
 410                 irqs->count : IRQ_TIMINGS_SIZE;
 411 
 412         start = irqs->count < IRQ_TIMINGS_SIZE ?
 413                 0 : (irqs->count & IRQ_TIMINGS_MASK);
 414 
 415         /*
 416          * Copy the content of the circular buffer into another buffer
 417          * in order to linearize the buffer instead of dealing with
 418          * wrapping indexes and shifted array which will be prone to
 419          * error and extremelly difficult to debug.
 420          */
 421         for (i = 0; i < count; i++) {
 422                 int index = (start + i) & IRQ_TIMINGS_MASK;
 423 
 424                 irqs->timings[i] = irqs->circ_timings[index];
 425                 min = min_t(int, irqs->timings[i], min);
 426         }
 427 
 428         index = irq_timings_next_event_index(irqs->timings, count, period_max);
 429         if (index < 0)
 430                 return irqs->last_ts + irqs->ema_time[min];
 431 
 432         return irqs->last_ts + irqs->ema_time[index];
 433 }
 434 
 435 static __always_inline int irq_timings_interval_index(u64 interval)
 436 {
 437         /*
 438          * The PREDICTION_FACTOR increase the interval size for the
 439          * array of exponential average.
 440          */
 441         u64 interval_us = (interval >> 10) / PREDICTION_FACTOR;
 442 
 443         return likely(interval_us) ? ilog2(interval_us) : 0;
 444 }
 445 
 446 static __always_inline void __irq_timings_store(int irq, struct irqt_stat *irqs,
 447                                                 u64 interval)
 448 {
 449         int index;
 450 
 451         /*
 452          * Get the index in the ema table for this interrupt.
 453          */
 454         index = irq_timings_interval_index(interval);
 455 
 456         /*
 457          * Store the index as an element of the pattern in another
 458          * circular array.
 459          */
 460         irqs->circ_timings[irqs->count & IRQ_TIMINGS_MASK] = index;
 461 
 462         irqs->ema_time[index] = irq_timings_ema_new(interval,
 463                                                     irqs->ema_time[index]);
 464 
 465         irqs->count++;
 466 }
 467 
 468 static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts)
 469 {
 470         u64 old_ts = irqs->last_ts;
 471         u64 interval;
 472 
 473         /*
 474          * The timestamps are absolute time values, we need to compute
 475          * the timing interval between two interrupts.
 476          */
 477         irqs->last_ts = ts;
 478 
 479         /*
 480          * The interval type is u64 in order to deal with the same
 481          * type in our computation, that prevent mindfuck issues with
 482          * overflow, sign and division.
 483          */
 484         interval = ts - old_ts;
 485 
 486         /*
 487          * The interrupt triggered more than one second apart, that
 488          * ends the sequence as predictible for our purpose. In this
 489          * case, assume we have the beginning of a sequence and the
 490          * timestamp is the first value. As it is impossible to
 491          * predict anything at this point, return.
 492          *
 493          * Note the first timestamp of the sequence will always fall
 494          * in this test because the old_ts is zero. That is what we
 495          * want as we need another timestamp to compute an interval.
 496          */
 497         if (interval >= NSEC_PER_SEC) {
 498                 irqs->count = 0;
 499                 return;
 500         }
 501 
 502         __irq_timings_store(irq, irqs, interval);
 503 }
 504 
 505 /**
 506  * irq_timings_next_event - Return when the next event is supposed to arrive
 507  *
 508  * During the last busy cycle, the number of interrupts is incremented
 509  * and stored in the irq_timings structure. This information is
 510  * necessary to:
 511  *
 512  * - know if the index in the table wrapped up:
 513  *
 514  *      If more than the array size interrupts happened during the
 515  *      last busy/idle cycle, the index wrapped up and we have to
 516  *      begin with the next element in the array which is the last one
 517  *      in the sequence, otherwise it is a the index 0.
 518  *
 519  * - have an indication of the interrupts activity on this CPU
 520  *   (eg. irq/sec)
 521  *
 522  * The values are 'consumed' after inserting in the statistical model,
 523  * thus the count is reinitialized.
 524  *
 525  * The array of values **must** be browsed in the time direction, the
 526  * timestamp must increase between an element and the next one.
 527  *
 528  * Returns a nanosec time based estimation of the earliest interrupt,
 529  * U64_MAX otherwise.
 530  */
 531 u64 irq_timings_next_event(u64 now)
 532 {
 533         struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
 534         struct irqt_stat *irqs;
 535         struct irqt_stat __percpu *s;
 536         u64 ts, next_evt = U64_MAX;
 537         int i, irq = 0;
 538 
 539         /*
 540          * This function must be called with the local irq disabled in
 541          * order to prevent the timings circular buffer to be updated
 542          * while we are reading it.
 543          */
 544         lockdep_assert_irqs_disabled();
 545 
 546         if (!irqts->count)
 547                 return next_evt;
 548 
 549         /*
 550          * Number of elements in the circular buffer: If it happens it
 551          * was flushed before, then the number of elements could be
 552          * smaller than IRQ_TIMINGS_SIZE, so the count is used,
 553          * otherwise the array size is used as we wrapped. The index
 554          * begins from zero when we did not wrap. That could be done
 555          * in a nicer way with the proper circular array structure
 556          * type but with the cost of extra computation in the
 557          * interrupt handler hot path. We choose efficiency.
 558          *
 559          * Inject measured irq/timestamp to the pattern prediction
 560          * model while decrementing the counter because we consume the
 561          * data from our circular buffer.
 562          */
 563         for_each_irqts(i, irqts) {
 564                 irq = irq_timing_decode(irqts->values[i], &ts);
 565                 s = idr_find(&irqt_stats, irq);
 566                 if (s)
 567                         irq_timings_store(irq, this_cpu_ptr(s), ts);
 568         }
 569 
 570         /*
 571          * Look in the list of interrupts' statistics, the earliest
 572          * next event.
 573          */
 574         idr_for_each_entry(&irqt_stats, s, i) {
 575 
 576                 irqs = this_cpu_ptr(s);
 577 
 578                 ts = __irq_timings_next_event(irqs, i, now);
 579                 if (ts <= now)
 580                         return now;
 581 
 582                 if (ts < next_evt)
 583                         next_evt = ts;
 584         }
 585 
 586         return next_evt;
 587 }
 588 
 589 void irq_timings_free(int irq)
 590 {
 591         struct irqt_stat __percpu *s;
 592 
 593         s = idr_find(&irqt_stats, irq);
 594         if (s) {
 595                 free_percpu(s);
 596                 idr_remove(&irqt_stats, irq);
 597         }
 598 }
 599 
 600 int irq_timings_alloc(int irq)
 601 {
 602         struct irqt_stat __percpu *s;
 603         int id;
 604 
 605         /*
 606          * Some platforms can have the same private interrupt per cpu,
 607          * so this function may be be called several times with the
 608          * same interrupt number. Just bail out in case the per cpu
 609          * stat structure is already allocated.
 610          */
 611         s = idr_find(&irqt_stats, irq);
 612         if (s)
 613                 return 0;
 614 
 615         s = alloc_percpu(*s);
 616         if (!s)
 617                 return -ENOMEM;
 618 
 619         idr_preload(GFP_KERNEL);
 620         id = idr_alloc(&irqt_stats, s, irq, irq + 1, GFP_NOWAIT);
 621         idr_preload_end();
 622 
 623         if (id < 0) {
 624                 free_percpu(s);
 625                 return id;
 626         }
 627 
 628         return 0;
 629 }
 630 
 631 #ifdef CONFIG_TEST_IRQ_TIMINGS
 632 struct timings_intervals {
 633         u64 *intervals;
 634         size_t count;
 635 };
 636 
 637 /*
 638  * Intervals are given in nanosecond base
 639  */
 640 static u64 intervals0[] __initdata = {
 641         10000, 50000, 200000, 500000,
 642         10000, 50000, 200000, 500000,
 643         10000, 50000, 200000, 500000,
 644         10000, 50000, 200000, 500000,
 645         10000, 50000, 200000, 500000,
 646         10000, 50000, 200000, 500000,
 647         10000, 50000, 200000, 500000,
 648         10000, 50000, 200000, 500000,
 649         10000, 50000, 200000,
 650 };
 651 
 652 static u64 intervals1[] __initdata = {
 653         223947000, 1240000, 1384000, 1386000, 1386000,
 654         217416000, 1236000, 1384000, 1386000, 1387000,
 655         214719000, 1241000, 1386000, 1387000, 1384000,
 656         213696000, 1234000, 1384000, 1386000, 1388000,
 657         219904000, 1240000, 1385000, 1389000, 1385000,
 658         212240000, 1240000, 1386000, 1386000, 1386000,
 659         214415000, 1236000, 1384000, 1386000, 1387000,
 660         214276000, 1234000,
 661 };
 662 
 663 static u64 intervals2[] __initdata = {
 664         4000, 3000, 5000, 100000,
 665         3000, 3000, 5000, 117000,
 666         4000, 4000, 5000, 112000,
 667         4000, 3000, 4000, 110000,
 668         3000, 5000, 3000, 117000,
 669         4000, 4000, 5000, 112000,
 670         4000, 3000, 4000, 110000,
 671         3000, 4000, 5000, 112000,
 672         4000,
 673 };
 674 
 675 static u64 intervals3[] __initdata = {
 676         1385000, 212240000, 1240000,
 677         1386000, 214415000, 1236000,
 678         1384000, 214276000, 1234000,
 679         1386000, 214415000, 1236000,
 680         1385000, 212240000, 1240000,
 681         1386000, 214415000, 1236000,
 682         1384000, 214276000, 1234000,
 683         1386000, 214415000, 1236000,
 684         1385000, 212240000, 1240000,
 685 };
 686 
 687 static u64 intervals4[] __initdata = {
 688         10000, 50000, 10000, 50000,
 689         10000, 50000, 10000, 50000,
 690         10000, 50000, 10000, 50000,
 691         10000, 50000, 10000, 50000,
 692         10000, 50000, 10000, 50000,
 693         10000, 50000, 10000, 50000,
 694         10000, 50000, 10000, 50000,
 695         10000, 50000, 10000, 50000,
 696         10000,
 697 };
 698 
 699 static struct timings_intervals tis[] __initdata = {
 700         { intervals0, ARRAY_SIZE(intervals0) },
 701         { intervals1, ARRAY_SIZE(intervals1) },
 702         { intervals2, ARRAY_SIZE(intervals2) },
 703         { intervals3, ARRAY_SIZE(intervals3) },
 704         { intervals4, ARRAY_SIZE(intervals4) },
 705 };
 706 
 707 static int __init irq_timings_test_next_index(struct timings_intervals *ti)
 708 {
 709         int _buffer[IRQ_TIMINGS_SIZE];
 710         int buffer[IRQ_TIMINGS_SIZE];
 711         int index, start, i, count, period_max;
 712 
 713         count = ti->count - 1;
 714 
 715         period_max = count > (3 * PREDICTION_PERIOD_MAX) ?
 716                 PREDICTION_PERIOD_MAX : count / 3;
 717 
 718         /*
 719          * Inject all values except the last one which will be used
 720          * to compare with the next index result.
 721          */
 722         pr_debug("index suite: ");
 723 
 724         for (i = 0; i < count; i++) {
 725                 index = irq_timings_interval_index(ti->intervals[i]);
 726                 _buffer[i & IRQ_TIMINGS_MASK] = index;
 727                 pr_cont("%d ", index);
 728         }
 729 
 730         start = count < IRQ_TIMINGS_SIZE ? 0 :
 731                 count & IRQ_TIMINGS_MASK;
 732 
 733         count = min_t(int, count, IRQ_TIMINGS_SIZE);
 734 
 735         for (i = 0; i < count; i++) {
 736                 int index = (start + i) & IRQ_TIMINGS_MASK;
 737                 buffer[i] = _buffer[index];
 738         }
 739 
 740         index = irq_timings_next_event_index(buffer, count, period_max);
 741         i = irq_timings_interval_index(ti->intervals[ti->count - 1]);
 742 
 743         if (index != i) {
 744                 pr_err("Expected (%d) and computed (%d) next indexes differ\n",
 745                        i, index);
 746                 return -EINVAL;
 747         }
 748 
 749         return 0;
 750 }
 751 
 752 static int __init irq_timings_next_index_selftest(void)
 753 {
 754         int i, ret;
 755 
 756         for (i = 0; i < ARRAY_SIZE(tis); i++) {
 757 
 758                 pr_info("---> Injecting intervals number #%d (count=%zd)\n",
 759                         i, tis[i].count);
 760 
 761                 ret = irq_timings_test_next_index(&tis[i]);
 762                 if (ret)
 763                         break;
 764         }
 765 
 766         return ret;
 767 }
 768 
 769 static int __init irq_timings_test_irqs(struct timings_intervals *ti)
 770 {
 771         struct irqt_stat __percpu *s;
 772         struct irqt_stat *irqs;
 773         int i, index, ret, irq = 0xACE5;
 774 
 775         ret = irq_timings_alloc(irq);
 776         if (ret) {
 777                 pr_err("Failed to allocate irq timings\n");
 778                 return ret;
 779         }
 780 
 781         s = idr_find(&irqt_stats, irq);
 782         if (!s) {
 783                 ret = -EIDRM;
 784                 goto out;
 785         }
 786 
 787         irqs = this_cpu_ptr(s);
 788 
 789         for (i = 0; i < ti->count; i++) {
 790 
 791                 index = irq_timings_interval_index(ti->intervals[i]);
 792                 pr_debug("%d: interval=%llu ema_index=%d\n",
 793                          i, ti->intervals[i], index);
 794 
 795                 __irq_timings_store(irq, irqs, ti->intervals[i]);
 796                 if (irqs->circ_timings[i & IRQ_TIMINGS_MASK] != index) {
 797                         pr_err("Failed to store in the circular buffer\n");
 798                         goto out;
 799                 }
 800         }
 801 
 802         if (irqs->count != ti->count) {
 803                 pr_err("Count differs\n");
 804                 goto out;
 805         }
 806 
 807         ret = 0;
 808 out:
 809         irq_timings_free(irq);
 810 
 811         return ret;
 812 }
 813 
 814 static int __init irq_timings_irqs_selftest(void)
 815 {
 816         int i, ret;
 817 
 818         for (i = 0; i < ARRAY_SIZE(tis); i++) {
 819                 pr_info("---> Injecting intervals number #%d (count=%zd)\n",
 820                         i, tis[i].count);
 821                 ret = irq_timings_test_irqs(&tis[i]);
 822                 if (ret)
 823                         break;
 824         }
 825 
 826         return ret;
 827 }
 828 
 829 static int __init irq_timings_test_irqts(struct irq_timings *irqts,
 830                                          unsigned count)
 831 {
 832         int start = count >= IRQ_TIMINGS_SIZE ? count - IRQ_TIMINGS_SIZE : 0;
 833         int i, irq, oirq = 0xBEEF;
 834         u64 ots = 0xDEAD, ts;
 835 
 836         /*
 837          * Fill the circular buffer by using the dedicated function.
 838          */
 839         for (i = 0; i < count; i++) {
 840                 pr_debug("%d: index=%d, ts=%llX irq=%X\n",
 841                          i, i & IRQ_TIMINGS_MASK, ots + i, oirq + i);
 842 
 843                 irq_timings_push(ots + i, oirq + i);
 844         }
 845 
 846         /*
 847          * Compute the first elements values after the index wrapped
 848          * up or not.
 849          */
 850         ots += start;
 851         oirq += start;
 852 
 853         /*
 854          * Test the circular buffer count is correct.
 855          */
 856         pr_debug("---> Checking timings array count (%d) is right\n", count);
 857         if (WARN_ON(irqts->count != count))
 858                 return -EINVAL;
 859 
 860         /*
 861          * Test the macro allowing to browse all the irqts.
 862          */
 863         pr_debug("---> Checking the for_each_irqts() macro\n");
 864         for_each_irqts(i, irqts) {
 865 
 866                 irq = irq_timing_decode(irqts->values[i], &ts);
 867 
 868                 pr_debug("index=%d, ts=%llX / %llX, irq=%X / %X\n",
 869                          i, ts, ots, irq, oirq);
 870 
 871                 if (WARN_ON(ts != ots || irq != oirq))
 872                         return -EINVAL;
 873 
 874                 ots++; oirq++;
 875         }
 876 
 877         /*
 878          * The circular buffer should have be flushed when browsed
 879          * with for_each_irqts
 880          */
 881         pr_debug("---> Checking timings array is empty after browsing it\n");
 882         if (WARN_ON(irqts->count))
 883                 return -EINVAL;
 884 
 885         return 0;
 886 }
 887 
 888 static int __init irq_timings_irqts_selftest(void)
 889 {
 890         struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
 891         int i, ret;
 892 
 893         /*
 894          * Test the circular buffer with different number of
 895          * elements. The purpose is to test at the limits (empty, half
 896          * full, full, wrapped with the cursor at the boundaries,
 897          * wrapped several times, etc ...
 898          */
 899         int count[] = { 0,
 900                         IRQ_TIMINGS_SIZE >> 1,
 901                         IRQ_TIMINGS_SIZE,
 902                         IRQ_TIMINGS_SIZE + (IRQ_TIMINGS_SIZE >> 1),
 903                         2 * IRQ_TIMINGS_SIZE,
 904                         (2 * IRQ_TIMINGS_SIZE) + 3,
 905         };
 906 
 907         for (i = 0; i < ARRAY_SIZE(count); i++) {
 908 
 909                 pr_info("---> Checking the timings with %d/%d values\n",
 910                         count[i], IRQ_TIMINGS_SIZE);
 911 
 912                 ret = irq_timings_test_irqts(irqts, count[i]);
 913                 if (ret)
 914                         break;
 915         }
 916 
 917         return ret;
 918 }
 919 
 920 static int __init irq_timings_selftest(void)
 921 {
 922         int ret;
 923 
 924         pr_info("------------------- selftest start -----------------\n");
 925 
 926         /*
 927          * At this point, we don't except any subsystem to use the irq
 928          * timings but us, so it should not be enabled.
 929          */
 930         if (static_branch_unlikely(&irq_timing_enabled)) {
 931                 pr_warn("irq timings already initialized, skipping selftest\n");
 932                 return 0;
 933         }
 934 
 935         ret = irq_timings_irqts_selftest();
 936         if (ret)
 937                 goto out;
 938 
 939         ret = irq_timings_irqs_selftest();
 940         if (ret)
 941                 goto out;
 942 
 943         ret = irq_timings_next_index_selftest();
 944 out:
 945         pr_info("---------- selftest end with %s -----------\n",
 946                 ret ? "failure" : "success");
 947 
 948         return ret;
 949 }
 950 early_initcall(irq_timings_selftest);
 951 #endif

/* [<][>][^][v][top][bottom][index][help] */