This source file includes following definitions.
- tick_get_tick_sched
- tick_do_update_jiffies64
- tick_init_jiffy_update
- tick_sched_do_timer
- tick_sched_handle
- check_tick_dependency
- can_stop_full_tick
- nohz_full_kick_func
- tick_nohz_full_kick
- tick_nohz_full_kick_cpu
- tick_nohz_full_kick_all
- tick_nohz_dep_set_all
- tick_nohz_dep_set
- tick_nohz_dep_clear
- tick_nohz_dep_set_cpu
- tick_nohz_dep_clear_cpu
- tick_nohz_dep_set_task
- tick_nohz_dep_clear_task
- tick_nohz_dep_set_signal
- tick_nohz_dep_clear_signal
- __tick_nohz_task_switch
- tick_nohz_full_setup
- tick_nohz_cpu_down
- tick_nohz_init
- setup_tick_nohz
- tick_nohz_tick_stopped
- tick_nohz_tick_stopped_cpu
- tick_nohz_update_jiffies
- update_ts_time_stats
- tick_nohz_stop_idle
- tick_nohz_start_idle
- get_cpu_idle_time_us
- get_cpu_iowait_time_us
- tick_nohz_restart
- local_timer_softirq_pending
- tick_nohz_next_event
- tick_nohz_stop_tick
- tick_nohz_retain_tick
- tick_nohz_stop_sched_tick
- tick_nohz_restart_sched_tick
- tick_nohz_full_update_tick
- can_stop_idle_tick
- __tick_nohz_idle_stop_tick
- tick_nohz_idle_stop_tick
- tick_nohz_idle_retain_tick
- tick_nohz_idle_enter
- tick_nohz_irq_exit
- tick_nohz_idle_got_tick
- tick_nohz_get_next_hrtimer
- tick_nohz_get_sleep_length
- tick_nohz_get_idle_calls_cpu
- tick_nohz_get_idle_calls
- tick_nohz_account_idle_ticks
- __tick_nohz_idle_restart_tick
- tick_nohz_idle_restart_tick
- tick_nohz_idle_exit
- tick_nohz_handler
- tick_nohz_activate
- tick_nohz_switch_to_nohz
- tick_nohz_irq_enter
- tick_nohz_switch_to_nohz
- tick_nohz_irq_enter
- tick_nohz_activate
- tick_irq_enter
- tick_sched_timer
- skew_tick
- tick_setup_sched_timer
- tick_cancel_sched_timer
- tick_clock_notify
- tick_oneshot_notify
- tick_check_oneshot_change
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 #include <linux/cpu.h>
  12 #include <linux/err.h>
  13 #include <linux/hrtimer.h>
  14 #include <linux/interrupt.h>
  15 #include <linux/kernel_stat.h>
  16 #include <linux/percpu.h>
  17 #include <linux/nmi.h>
  18 #include <linux/profile.h>
  19 #include <linux/sched/signal.h>
  20 #include <linux/sched/clock.h>
  21 #include <linux/sched/stat.h>
  22 #include <linux/sched/nohz.h>
  23 #include <linux/module.h>
  24 #include <linux/irq_work.h>
  25 #include <linux/posix-timers.h>
  26 #include <linux/context_tracking.h>
  27 #include <linux/mm.h>
  28 
  29 #include <asm/irq_regs.h>
  30 
  31 #include "tick-internal.h"
  32 
  33 #include <trace/events/timer.h>
  34 
  35 
  36 
  37 
  38 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
  39 
  40 struct tick_sched *tick_get_tick_sched(int cpu)
  41 {
  42         return &per_cpu(tick_cpu_sched, cpu);
  43 }
  44 
  45 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
  46 
  47 
  48 
  49 static ktime_t last_jiffies_update;
  50 
  51 
  52 
  53 
  54 static void tick_do_update_jiffies64(ktime_t now)
  55 {
  56         unsigned long ticks = 0;
  57         ktime_t delta;
  58 
  59         
  60 
  61 
  62 
  63         delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
  64         if (delta < tick_period)
  65                 return;
  66 
  67         
  68         write_seqlock(&jiffies_lock);
  69 
  70         delta = ktime_sub(now, last_jiffies_update);
  71         if (delta >= tick_period) {
  72 
  73                 delta = ktime_sub(delta, tick_period);
  74                 
  75                 WRITE_ONCE(last_jiffies_update,
  76                            ktime_add(last_jiffies_update, tick_period));
  77 
  78                 
  79                 if (unlikely(delta >= tick_period)) {
  80                         s64 incr = ktime_to_ns(tick_period);
  81 
  82                         ticks = ktime_divns(delta, incr);
  83 
  84                         
  85                         WRITE_ONCE(last_jiffies_update,
  86                                    ktime_add_ns(last_jiffies_update,
  87                                                 incr * ticks));
  88                 }
  89                 do_timer(++ticks);
  90 
  91                 
  92                 tick_next_period = ktime_add(last_jiffies_update, tick_period);
  93         } else {
  94                 write_sequnlock(&jiffies_lock);
  95                 return;
  96         }
  97         write_sequnlock(&jiffies_lock);
  98         update_wall_time();
  99 }
 100 
 101 
 102 
 103 
 104 static ktime_t tick_init_jiffy_update(void)
 105 {
 106         ktime_t period;
 107 
 108         write_seqlock(&jiffies_lock);
 109         
 110         if (last_jiffies_update == 0)
 111                 last_jiffies_update = tick_next_period;
 112         period = last_jiffies_update;
 113         write_sequnlock(&jiffies_lock);
 114         return period;
 115 }
 116 
 117 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
 118 {
 119         int cpu = smp_processor_id();
 120 
 121 #ifdef CONFIG_NO_HZ_COMMON
 122         
 123 
 124 
 125 
 126 
 127 
 128 
 129 
 130 
 131 
 132         if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
 133 #ifdef CONFIG_NO_HZ_FULL
 134                 WARN_ON(tick_nohz_full_running);
 135 #endif
 136                 tick_do_timer_cpu = cpu;
 137         }
 138 #endif
 139 
 140         
 141         if (tick_do_timer_cpu == cpu)
 142                 tick_do_update_jiffies64(now);
 143 
 144         if (ts->inidle)
 145                 ts->got_idle_tick = 1;
 146 }
 147 
 148 static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 149 {
 150 #ifdef CONFIG_NO_HZ_COMMON
 151         
 152 
 153 
 154 
 155 
 156 
 157 
 158 
 159         if (ts->tick_stopped) {
 160                 touch_softlockup_watchdog_sched();
 161                 if (is_idle_task(current))
 162                         ts->idle_jiffies++;
 163                 
 164 
 165 
 166 
 167 
 168                 ts->next_tick = 0;
 169         }
 170 #endif
 171         update_process_times(user_mode(regs));
 172         profile_tick(CPU_PROFILING);
 173 }
 174 #endif
 175 
 176 #ifdef CONFIG_NO_HZ_FULL
 177 cpumask_var_t tick_nohz_full_mask;
 178 bool tick_nohz_full_running;
 179 static atomic_t tick_dep_mask;
 180 
 181 static bool check_tick_dependency(atomic_t *dep)
 182 {
 183         int val = atomic_read(dep);
 184 
 185         if (val & TICK_DEP_MASK_POSIX_TIMER) {
 186                 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
 187                 return true;
 188         }
 189 
 190         if (val & TICK_DEP_MASK_PERF_EVENTS) {
 191                 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
 192                 return true;
 193         }
 194 
 195         if (val & TICK_DEP_MASK_SCHED) {
 196                 trace_tick_stop(0, TICK_DEP_MASK_SCHED);
 197                 return true;
 198         }
 199 
 200         if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
 201                 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
 202                 return true;
 203         }
 204 
 205         return false;
 206 }
 207 
 208 static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
 209 {
 210         lockdep_assert_irqs_disabled();
 211 
 212         if (unlikely(!cpu_online(cpu)))
 213                 return false;
 214 
 215         if (check_tick_dependency(&tick_dep_mask))
 216                 return false;
 217 
 218         if (check_tick_dependency(&ts->tick_dep_mask))
 219                 return false;
 220 
 221         if (check_tick_dependency(¤t->tick_dep_mask))
 222                 return false;
 223 
 224         if (check_tick_dependency(¤t->signal->tick_dep_mask))
 225                 return false;
 226 
 227         return true;
 228 }
 229 
 230 static void nohz_full_kick_func(struct irq_work *work)
 231 {
 232         
 233 }
 234 
 235 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
 236         .func = nohz_full_kick_func,
 237 };
 238 
 239 
 240 
 241 
 242 
 243 
 244 
 245 static void tick_nohz_full_kick(void)
 246 {
 247         if (!tick_nohz_full_cpu(smp_processor_id()))
 248                 return;
 249 
 250         irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
 251 }
 252 
 253 
 254 
 255 
 256 
 257 void tick_nohz_full_kick_cpu(int cpu)
 258 {
 259         if (!tick_nohz_full_cpu(cpu))
 260                 return;
 261 
 262         irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
 263 }
 264 
 265 
 266 
 267 
 268 
 269 static void tick_nohz_full_kick_all(void)
 270 {
 271         int cpu;
 272 
 273         if (!tick_nohz_full_running)
 274                 return;
 275 
 276         preempt_disable();
 277         for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
 278                 tick_nohz_full_kick_cpu(cpu);
 279         preempt_enable();
 280 }
 281 
 282 static void tick_nohz_dep_set_all(atomic_t *dep,
 283                                   enum tick_dep_bits bit)
 284 {
 285         int prev;
 286 
 287         prev = atomic_fetch_or(BIT(bit), dep);
 288         if (!prev)
 289                 tick_nohz_full_kick_all();
 290 }
 291 
 292 
 293 
 294 
 295 
 296 void tick_nohz_dep_set(enum tick_dep_bits bit)
 297 {
 298         tick_nohz_dep_set_all(&tick_dep_mask, bit);
 299 }
 300 
 301 void tick_nohz_dep_clear(enum tick_dep_bits bit)
 302 {
 303         atomic_andnot(BIT(bit), &tick_dep_mask);
 304 }
 305 
 306 
 307 
 308 
 309 
 310 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
 311 {
 312         int prev;
 313         struct tick_sched *ts;
 314 
 315         ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 316 
 317         prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
 318         if (!prev) {
 319                 preempt_disable();
 320                 
 321                 if (cpu == smp_processor_id()) {
 322                         tick_nohz_full_kick();
 323                 } else {
 324                         
 325                         if (!WARN_ON_ONCE(in_nmi()))
 326                                 tick_nohz_full_kick_cpu(cpu);
 327                 }
 328                 preempt_enable();
 329         }
 330 }
 331 
 332 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
 333 {
 334         struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 335 
 336         atomic_andnot(BIT(bit), &ts->tick_dep_mask);
 337 }
 338 
 339 
 340 
 341 
 342 
 343 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
 344 {
 345         
 346 
 347 
 348 
 349         tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
 350 }
 351 
 352 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
 353 {
 354         atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
 355 }
 356 
 357 
 358 
 359 
 360 
 361 void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
 362 {
 363         tick_nohz_dep_set_all(&sig->tick_dep_mask, bit);
 364 }
 365 
 366 void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
 367 {
 368         atomic_andnot(BIT(bit), &sig->tick_dep_mask);
 369 }
 370 
 371 
 372 
 373 
 374 
 375 
 376 void __tick_nohz_task_switch(void)
 377 {
 378         unsigned long flags;
 379         struct tick_sched *ts;
 380 
 381         local_irq_save(flags);
 382 
 383         if (!tick_nohz_full_cpu(smp_processor_id()))
 384                 goto out;
 385 
 386         ts = this_cpu_ptr(&tick_cpu_sched);
 387 
 388         if (ts->tick_stopped) {
 389                 if (atomic_read(¤t->tick_dep_mask) ||
 390                     atomic_read(¤t->signal->tick_dep_mask))
 391                         tick_nohz_full_kick();
 392         }
 393 out:
 394         local_irq_restore(flags);
 395 }
 396 
 397 
 398 void __init tick_nohz_full_setup(cpumask_var_t cpumask)
 399 {
 400         alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
 401         cpumask_copy(tick_nohz_full_mask, cpumask);
 402         tick_nohz_full_running = true;
 403 }
 404 
 405 static int tick_nohz_cpu_down(unsigned int cpu)
 406 {
 407         
 408 
 409 
 410 
 411 
 412         if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
 413                 return -EBUSY;
 414         return 0;
 415 }
 416 
 417 void __init tick_nohz_init(void)
 418 {
 419         int cpu, ret;
 420 
 421         if (!tick_nohz_full_running)
 422                 return;
 423 
 424         
 425 
 426 
 427 
 428 
 429         if (!arch_irq_work_has_interrupt()) {
 430                 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
 431                 cpumask_clear(tick_nohz_full_mask);
 432                 tick_nohz_full_running = false;
 433                 return;
 434         }
 435 
 436         if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
 437                         !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
 438                 cpu = smp_processor_id();
 439 
 440                 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
 441                         pr_warn("NO_HZ: Clearing %d from nohz_full range "
 442                                 "for timekeeping\n", cpu);
 443                         cpumask_clear_cpu(cpu, tick_nohz_full_mask);
 444                 }
 445         }
 446 
 447         for_each_cpu(cpu, tick_nohz_full_mask)
 448                 context_tracking_cpu_set(cpu);
 449 
 450         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
 451                                         "kernel/nohz:predown", NULL,
 452                                         tick_nohz_cpu_down);
 453         WARN_ON(ret < 0);
 454         pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
 455                 cpumask_pr_args(tick_nohz_full_mask));
 456 }
 457 #endif
 458 
 459 
 460 
 461 
 462 #ifdef CONFIG_NO_HZ_COMMON
 463 
 464 
 465 
 466 bool tick_nohz_enabled __read_mostly  = true;
 467 unsigned long tick_nohz_active  __read_mostly;
 468 
 469 
 470 
 471 static int __init setup_tick_nohz(char *str)
 472 {
 473         return (kstrtobool(str, &tick_nohz_enabled) == 0);
 474 }
 475 
 476 __setup("nohz=", setup_tick_nohz);
 477 
 478 bool tick_nohz_tick_stopped(void)
 479 {
 480         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 481 
 482         return ts->tick_stopped;
 483 }
 484 
 485 bool tick_nohz_tick_stopped_cpu(int cpu)
 486 {
 487         struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 488 
 489         return ts->tick_stopped;
 490 }
 491 
 492 
 493 
 494 
 495 
 496 
 497 
 498 
 499 
 500 
 501 
 502 static void tick_nohz_update_jiffies(ktime_t now)
 503 {
 504         unsigned long flags;
 505 
 506         __this_cpu_write(tick_cpu_sched.idle_waketime, now);
 507 
 508         local_irq_save(flags);
 509         tick_do_update_jiffies64(now);
 510         local_irq_restore(flags);
 511 
 512         touch_softlockup_watchdog_sched();
 513 }
 514 
 515 
 516 
 517 
 518 static void
 519 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
 520 {
 521         ktime_t delta;
 522 
 523         if (ts->idle_active) {
 524                 delta = ktime_sub(now, ts->idle_entrytime);
 525                 if (nr_iowait_cpu(cpu) > 0)
 526                         ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
 527                 else
 528                         ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
 529                 ts->idle_entrytime = now;
 530         }
 531 
 532         if (last_update_time)
 533                 *last_update_time = ktime_to_us(now);
 534 
 535 }
 536 
 537 static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
 538 {
 539         update_ts_time_stats(smp_processor_id(), ts, now, NULL);
 540         ts->idle_active = 0;
 541 
 542         sched_clock_idle_wakeup_event();
 543 }
 544 
 545 static void tick_nohz_start_idle(struct tick_sched *ts)
 546 {
 547         ts->idle_entrytime = ktime_get();
 548         ts->idle_active = 1;
 549         sched_clock_idle_sleep_event();
 550 }
 551 
 552 
 553 
 554 
 555 
 556 
 557 
 558 
 559 
 560 
 561 
 562 
 563 
 564 
 565 
 566 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
 567 {
 568         struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 569         ktime_t now, idle;
 570 
 571         if (!tick_nohz_active)
 572                 return -1;
 573 
 574         now = ktime_get();
 575         if (last_update_time) {
 576                 update_ts_time_stats(cpu, ts, now, last_update_time);
 577                 idle = ts->idle_sleeptime;
 578         } else {
 579                 if (ts->idle_active && !nr_iowait_cpu(cpu)) {
 580                         ktime_t delta = ktime_sub(now, ts->idle_entrytime);
 581 
 582                         idle = ktime_add(ts->idle_sleeptime, delta);
 583                 } else {
 584                         idle = ts->idle_sleeptime;
 585                 }
 586         }
 587 
 588         return ktime_to_us(idle);
 589 
 590 }
 591 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
 592 
 593 
 594 
 595 
 596 
 597 
 598 
 599 
 600 
 601 
 602 
 603 
 604 
 605 
 606 
 607 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 608 {
 609         struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 610         ktime_t now, iowait;
 611 
 612         if (!tick_nohz_active)
 613                 return -1;
 614 
 615         now = ktime_get();
 616         if (last_update_time) {
 617                 update_ts_time_stats(cpu, ts, now, last_update_time);
 618                 iowait = ts->iowait_sleeptime;
 619         } else {
 620                 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
 621                         ktime_t delta = ktime_sub(now, ts->idle_entrytime);
 622 
 623                         iowait = ktime_add(ts->iowait_sleeptime, delta);
 624                 } else {
 625                         iowait = ts->iowait_sleeptime;
 626                 }
 627         }
 628 
 629         return ktime_to_us(iowait);
 630 }
 631 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 632 
 633 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
 634 {
 635         hrtimer_cancel(&ts->sched_timer);
 636         hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
 637 
 638         
 639         hrtimer_forward(&ts->sched_timer, now, tick_period);
 640 
 641         if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
 642                 hrtimer_start_expires(&ts->sched_timer,
 643                                       HRTIMER_MODE_ABS_PINNED_HARD);
 644         } else {
 645                 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
 646         }
 647 
 648         
 649 
 650 
 651 
 652         ts->next_tick = 0;
 653 }
 654 
 655 static inline bool local_timer_softirq_pending(void)
 656 {
 657         return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
 658 }
 659 
 660 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
 661 {
 662         u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
 663         unsigned long basejiff;
 664         unsigned int seq;
 665 
 666         
 667         do {
 668                 seq = read_seqbegin(&jiffies_lock);
 669                 basemono = last_jiffies_update;
 670                 basejiff = jiffies;
 671         } while (read_seqretry(&jiffies_lock, seq));
 672         ts->last_jiffies = basejiff;
 673         ts->timer_expires_base = basemono;
 674 
 675         
 676 
 677 
 678 
 679 
 680 
 681 
 682 
 683 
 684 
 685         if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
 686             irq_work_needs_cpu() || local_timer_softirq_pending()) {
 687                 next_tick = basemono + TICK_NSEC;
 688         } else {
 689                 
 690 
 691 
 692 
 693 
 694 
 695 
 696                 next_tmr = get_next_timer_interrupt(basejiff, basemono);
 697                 ts->next_timer = next_tmr;
 698                 
 699                 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
 700         }
 701 
 702         
 703 
 704 
 705 
 706         delta = next_tick - basemono;
 707         if (delta <= (u64)TICK_NSEC) {
 708                 
 709 
 710 
 711 
 712                 timer_clear_idle();
 713                 
 714 
 715 
 716 
 717                 if (!ts->tick_stopped) {
 718                         ts->timer_expires = 0;
 719                         goto out;
 720                 }
 721         }
 722 
 723         
 724 
 725 
 726 
 727 
 728         delta = timekeeping_max_deferment();
 729         if (cpu != tick_do_timer_cpu &&
 730             (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last))
 731                 delta = KTIME_MAX;
 732 
 733         
 734         if (delta < (KTIME_MAX - basemono))
 735                 expires = basemono + delta;
 736         else
 737                 expires = KTIME_MAX;
 738 
 739         ts->timer_expires = min_t(u64, expires, next_tick);
 740 
 741 out:
 742         return ts->timer_expires;
 743 }
 744 
 745 static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
 746 {
 747         struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 748         u64 basemono = ts->timer_expires_base;
 749         u64 expires = ts->timer_expires;
 750         ktime_t tick = expires;
 751 
 752         
 753         ts->timer_expires_base = 0;
 754 
 755         
 756 
 757 
 758 
 759 
 760 
 761 
 762 
 763         if (cpu == tick_do_timer_cpu) {
 764                 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
 765                 ts->do_timer_last = 1;
 766         } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
 767                 ts->do_timer_last = 0;
 768         }
 769 
 770         
 771         if (ts->tick_stopped && (expires == ts->next_tick)) {
 772                 
 773                 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
 774                         return;
 775 
 776                 WARN_ON_ONCE(1);
 777                 printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
 778                             basemono, ts->next_tick, dev->next_event,
 779                             hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
 780         }
 781 
 782         
 783 
 784 
 785 
 786 
 787 
 788 
 789         if (!ts->tick_stopped) {
 790                 calc_load_nohz_start();
 791                 quiet_vmstat();
 792 
 793                 ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
 794                 ts->tick_stopped = 1;
 795                 trace_tick_stop(1, TICK_DEP_MASK_NONE);
 796         }
 797 
 798         ts->next_tick = tick;
 799 
 800         
 801 
 802 
 803 
 804         if (unlikely(expires == KTIME_MAX)) {
 805                 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
 806                         hrtimer_cancel(&ts->sched_timer);
 807                 return;
 808         }
 809 
 810         if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
 811                 hrtimer_start(&ts->sched_timer, tick,
 812                               HRTIMER_MODE_ABS_PINNED_HARD);
 813         } else {
 814                 hrtimer_set_expires(&ts->sched_timer, tick);
 815                 tick_program_event(tick, 1);
 816         }
 817 }
 818 
 819 static void tick_nohz_retain_tick(struct tick_sched *ts)
 820 {
 821         ts->timer_expires_base = 0;
 822 }
 823 
 824 #ifdef CONFIG_NO_HZ_FULL
 825 static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu)
 826 {
 827         if (tick_nohz_next_event(ts, cpu))
 828                 tick_nohz_stop_tick(ts, cpu);
 829         else
 830                 tick_nohz_retain_tick(ts);
 831 }
 832 #endif 
 833 
 834 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
 835 {
 836         
 837         tick_do_update_jiffies64(now);
 838 
 839         
 840 
 841 
 842 
 843         timer_clear_idle();
 844 
 845         calc_load_nohz_stop();
 846         touch_softlockup_watchdog_sched();
 847         
 848 
 849 
 850         ts->tick_stopped  = 0;
 851         ts->idle_exittime = now;
 852 
 853         tick_nohz_restart(ts, now);
 854 }
 855 
 856 static void tick_nohz_full_update_tick(struct tick_sched *ts)
 857 {
 858 #ifdef CONFIG_NO_HZ_FULL
 859         int cpu = smp_processor_id();
 860 
 861         if (!tick_nohz_full_cpu(cpu))
 862                 return;
 863 
 864         if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
 865                 return;
 866 
 867         if (can_stop_full_tick(cpu, ts))
 868                 tick_nohz_stop_sched_tick(ts, cpu);
 869         else if (ts->tick_stopped)
 870                 tick_nohz_restart_sched_tick(ts, ktime_get());
 871 #endif
 872 }
 873 
 874 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
 875 {
 876         
 877 
 878 
 879 
 880 
 881 
 882 
 883         if (unlikely(!cpu_online(cpu))) {
 884                 if (cpu == tick_do_timer_cpu)
 885                         tick_do_timer_cpu = TICK_DO_TIMER_NONE;
 886                 
 887 
 888 
 889 
 890                 ts->next_tick = 0;
 891                 return false;
 892         }
 893 
 894         if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
 895                 return false;
 896 
 897         if (need_resched())
 898                 return false;
 899 
 900         if (unlikely(local_softirq_pending())) {
 901                 static int ratelimit;
 902 
 903                 if (ratelimit < 10 &&
 904                     (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
 905                         pr_warn("NOHZ: local_softirq_pending %02x\n",
 906                                 (unsigned int) local_softirq_pending());
 907                         ratelimit++;
 908                 }
 909                 return false;
 910         }
 911 
 912         if (tick_nohz_full_enabled()) {
 913                 
 914 
 915 
 916 
 917                 if (tick_do_timer_cpu == cpu)
 918                         return false;
 919                 
 920 
 921 
 922 
 923 
 924                 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
 925                         return false;
 926 
 927                 
 928                 if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
 929                         return false;
 930         }
 931 
 932         return true;
 933 }
 934 
 935 static void __tick_nohz_idle_stop_tick(struct tick_sched *ts)
 936 {
 937         ktime_t expires;
 938         int cpu = smp_processor_id();
 939 
 940         
 941 
 942 
 943 
 944         if (ts->timer_expires_base)
 945                 expires = ts->timer_expires;
 946         else if (can_stop_idle_tick(cpu, ts))
 947                 expires = tick_nohz_next_event(ts, cpu);
 948         else
 949                 return;
 950 
 951         ts->idle_calls++;
 952 
 953         if (expires > 0LL) {
 954                 int was_stopped = ts->tick_stopped;
 955 
 956                 tick_nohz_stop_tick(ts, cpu);
 957 
 958                 ts->idle_sleeps++;
 959                 ts->idle_expires = expires;
 960 
 961                 if (!was_stopped && ts->tick_stopped) {
 962                         ts->idle_jiffies = ts->last_jiffies;
 963                         nohz_balance_enter_idle(cpu);
 964                 }
 965         } else {
 966                 tick_nohz_retain_tick(ts);
 967         }
 968 }
 969 
 970 
 971 
 972 
 973 
 974 
 975 void tick_nohz_idle_stop_tick(void)
 976 {
 977         __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched));
 978 }
 979 
 980 void tick_nohz_idle_retain_tick(void)
 981 {
 982         tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched));
 983         
 984 
 985 
 986 
 987         timer_clear_idle();
 988 }
 989 
 990 
 991 
 992 
 993 
 994 
 995 void tick_nohz_idle_enter(void)
 996 {
 997         struct tick_sched *ts;
 998 
 999         lockdep_assert_irqs_enabled();
1000 
1001         local_irq_disable();
1002 
1003         ts = this_cpu_ptr(&tick_cpu_sched);
1004 
1005         WARN_ON_ONCE(ts->timer_expires_base);
1006 
1007         ts->inidle = 1;
1008         tick_nohz_start_idle(ts);
1009 
1010         local_irq_enable();
1011 }
1012 
1013 
1014 
1015 
1016 
1017 
1018 
1019 
1020 
1021 void tick_nohz_irq_exit(void)
1022 {
1023         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1024 
1025         if (ts->inidle)
1026                 tick_nohz_start_idle(ts);
1027         else
1028                 tick_nohz_full_update_tick(ts);
1029 }
1030 
1031 
1032 
1033 
1034 bool tick_nohz_idle_got_tick(void)
1035 {
1036         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1037 
1038         if (ts->got_idle_tick) {
1039                 ts->got_idle_tick = 0;
1040                 return true;
1041         }
1042         return false;
1043 }
1044 
1045 
1046 
1047 
1048 
1049 
1050 
1051 
1052 ktime_t tick_nohz_get_next_hrtimer(void)
1053 {
1054         return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
1055 }
1056 
1057 
1058 
1059 
1060 
1061 
1062 
1063 ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
1064 {
1065         struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
1066         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1067         int cpu = smp_processor_id();
1068         
1069 
1070 
1071 
1072         ktime_t now = ts->idle_entrytime;
1073         ktime_t next_event;
1074 
1075         WARN_ON_ONCE(!ts->inidle);
1076 
1077         *delta_next = ktime_sub(dev->next_event, now);
1078 
1079         if (!can_stop_idle_tick(cpu, ts))
1080                 return *delta_next;
1081 
1082         next_event = tick_nohz_next_event(ts, cpu);
1083         if (!next_event)
1084                 return *delta_next;
1085 
1086         
1087 
1088 
1089 
1090         next_event = min_t(u64, next_event,
1091                            hrtimer_next_event_without(&ts->sched_timer));
1092 
1093         return ktime_sub(next_event, now);
1094 }
1095 
1096 
1097 
1098 
1099 
1100 
1101 
1102 unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
1103 {
1104         struct tick_sched *ts = tick_get_tick_sched(cpu);
1105 
1106         return ts->idle_calls;
1107 }
1108 
1109 
1110 
1111 
1112 
1113 
1114 unsigned long tick_nohz_get_idle_calls(void)
1115 {
1116         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1117 
1118         return ts->idle_calls;
1119 }
1120 
1121 static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
1122 {
1123 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1124         unsigned long ticks;
1125 
1126         if (vtime_accounting_cpu_enabled())
1127                 return;
1128         
1129 
1130 
1131 
1132 
1133         ticks = jiffies - ts->idle_jiffies;
1134         
1135 
1136 
1137         if (ticks && ticks < LONG_MAX)
1138                 account_idle_ticks(ticks);
1139 #endif
1140 }
1141 
1142 static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now)
1143 {
1144         tick_nohz_restart_sched_tick(ts, now);
1145         tick_nohz_account_idle_ticks(ts);
1146 }
1147 
1148 void tick_nohz_idle_restart_tick(void)
1149 {
1150         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1151 
1152         if (ts->tick_stopped)
1153                 __tick_nohz_idle_restart_tick(ts, ktime_get());
1154 }
1155 
1156 
1157 
1158 
1159 
1160 
1161 
1162 
1163 void tick_nohz_idle_exit(void)
1164 {
1165         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1166         bool idle_active, tick_stopped;
1167         ktime_t now;
1168 
1169         local_irq_disable();
1170 
1171         WARN_ON_ONCE(!ts->inidle);
1172         WARN_ON_ONCE(ts->timer_expires_base);
1173 
1174         ts->inidle = 0;
1175         idle_active = ts->idle_active;
1176         tick_stopped = ts->tick_stopped;
1177 
1178         if (idle_active || tick_stopped)
1179                 now = ktime_get();
1180 
1181         if (idle_active)
1182                 tick_nohz_stop_idle(ts, now);
1183 
1184         if (tick_stopped)
1185                 __tick_nohz_idle_restart_tick(ts, now);
1186 
1187         local_irq_enable();
1188 }
1189 
1190 
1191 
1192 
1193 static void tick_nohz_handler(struct clock_event_device *dev)
1194 {
1195         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1196         struct pt_regs *regs = get_irq_regs();
1197         ktime_t now = ktime_get();
1198 
1199         dev->next_event = KTIME_MAX;
1200 
1201         tick_sched_do_timer(ts, now);
1202         tick_sched_handle(ts, regs);
1203 
1204         
1205         if (unlikely(ts->tick_stopped))
1206                 return;
1207 
1208         hrtimer_forward(&ts->sched_timer, now, tick_period);
1209         tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1210 }
1211 
1212 static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
1213 {
1214         if (!tick_nohz_enabled)
1215                 return;
1216         ts->nohz_mode = mode;
1217         
1218         if (!test_and_set_bit(0, &tick_nohz_active))
1219                 timers_update_nohz();
1220 }
1221 
1222 
1223 
1224 
1225 static void tick_nohz_switch_to_nohz(void)
1226 {
1227         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1228         ktime_t next;
1229 
1230         if (!tick_nohz_enabled)
1231                 return;
1232 
1233         if (tick_switch_to_oneshot(tick_nohz_handler))
1234                 return;
1235 
1236         
1237 
1238 
1239 
1240         hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1241         
1242         next = tick_init_jiffy_update();
1243 
1244         hrtimer_set_expires(&ts->sched_timer, next);
1245         hrtimer_forward_now(&ts->sched_timer, tick_period);
1246         tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1247         tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
1248 }
1249 
1250 static inline void tick_nohz_irq_enter(void)
1251 {
1252         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1253         ktime_t now;
1254 
1255         if (!ts->idle_active && !ts->tick_stopped)
1256                 return;
1257         now = ktime_get();
1258         if (ts->idle_active)
1259                 tick_nohz_stop_idle(ts, now);
1260         if (ts->tick_stopped)
1261                 tick_nohz_update_jiffies(now);
1262 }
1263 
1264 #else
1265 
1266 static inline void tick_nohz_switch_to_nohz(void) { }
1267 static inline void tick_nohz_irq_enter(void) { }
1268 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
1269 
1270 #endif 
1271 
1272 
1273 
1274 
1275 void tick_irq_enter(void)
1276 {
1277         tick_check_oneshot_broadcast_this_cpu();
1278         tick_nohz_irq_enter();
1279 }
1280 
1281 
1282 
1283 
1284 #ifdef CONFIG_HIGH_RES_TIMERS
1285 
1286 
1287 
1288 
1289 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
1290 {
1291         struct tick_sched *ts =
1292                 container_of(timer, struct tick_sched, sched_timer);
1293         struct pt_regs *regs = get_irq_regs();
1294         ktime_t now = ktime_get();
1295 
1296         tick_sched_do_timer(ts, now);
1297 
1298         
1299 
1300 
1301 
1302         if (regs)
1303                 tick_sched_handle(ts, regs);
1304         else
1305                 ts->next_tick = 0;
1306 
1307         
1308         if (unlikely(ts->tick_stopped))
1309                 return HRTIMER_NORESTART;
1310 
1311         hrtimer_forward(timer, now, tick_period);
1312 
1313         return HRTIMER_RESTART;
1314 }
1315 
1316 static int sched_skew_tick;
1317 
1318 static int __init skew_tick(char *str)
1319 {
1320         get_option(&str, &sched_skew_tick);
1321 
1322         return 0;
1323 }
1324 early_param("skew_tick", skew_tick);
1325 
1326 
1327 
1328 
1329 void tick_setup_sched_timer(void)
1330 {
1331         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1332         ktime_t now = ktime_get();
1333 
1334         
1335 
1336 
1337         hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1338         ts->sched_timer.function = tick_sched_timer;
1339 
1340         
1341         hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
1342 
1343         
1344         if (sched_skew_tick) {
1345                 u64 offset = ktime_to_ns(tick_period) >> 1;
1346                 do_div(offset, num_possible_cpus());
1347                 offset *= smp_processor_id();
1348                 hrtimer_add_expires_ns(&ts->sched_timer, offset);
1349         }
1350 
1351         hrtimer_forward(&ts->sched_timer, now, tick_period);
1352         hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
1353         tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
1354 }
1355 #endif 
1356 
1357 #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
1358 void tick_cancel_sched_timer(int cpu)
1359 {
1360         struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1361 
1362 # ifdef CONFIG_HIGH_RES_TIMERS
1363         if (ts->sched_timer.base)
1364                 hrtimer_cancel(&ts->sched_timer);
1365 # endif
1366 
1367         memset(ts, 0, sizeof(*ts));
1368 }
1369 #endif
1370 
1371 
1372 
1373 
1374 void tick_clock_notify(void)
1375 {
1376         int cpu;
1377 
1378         for_each_possible_cpu(cpu)
1379                 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
1380 }
1381 
1382 
1383 
1384 
1385 void tick_oneshot_notify(void)
1386 {
1387         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1388 
1389         set_bit(0, &ts->check_clocks);
1390 }
1391 
1392 
1393 
1394 
1395 
1396 
1397 
1398 
1399 
1400 int tick_check_oneshot_change(int allow_nohz)
1401 {
1402         struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1403 
1404         if (!test_and_clear_bit(0, &ts->check_clocks))
1405                 return 0;
1406 
1407         if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
1408                 return 0;
1409 
1410         if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
1411                 return 0;
1412 
1413         if (!allow_nohz)
1414                 return 1;
1415 
1416         tick_nohz_switch_to_nohz();
1417         return 0;
1418 }