Lines Matching refs:cpu

44 struct tick_sched *tick_get_tick_sched(int cpu)  in tick_get_tick_sched()  argument
46 return &per_cpu(tick_cpu_sched, cpu); in tick_get_tick_sched()
114 int cpu = smp_processor_id(); in tick_sched_do_timer() local
125 && !tick_nohz_full_cpu(cpu)) in tick_sched_do_timer()
126 tick_do_timer_cpu = cpu; in tick_sched_do_timer()
130 if (tick_do_timer_cpu == cpu) in tick_sched_do_timer()
245 void tick_nohz_full_kick_cpu(int cpu) in tick_nohz_full_kick_cpu() argument
247 if (!tick_nohz_full_cpu(cpu)) in tick_nohz_full_kick_cpu()
250 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); in tick_nohz_full_kick_cpu()
314 unsigned int cpu = (unsigned long)hcpu; in tick_nohz_cpu_down_callback() local
322 if (tick_nohz_full_running && tick_do_timer_cpu == cpu) in tick_nohz_cpu_down_callback()
347 int cpu; in tick_nohz_init() local
375 cpu = smp_processor_id(); in tick_nohz_init()
377 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { in tick_nohz_init()
378 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); in tick_nohz_init()
379 cpumask_clear_cpu(cpu, tick_nohz_full_mask); in tick_nohz_init()
385 for_each_cpu(cpu, tick_nohz_full_mask) in tick_nohz_init()
386 context_tracking_cpu_set(cpu); in tick_nohz_init()
451 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) in update_ts_time_stats() argument
457 if (nr_iowait_cpu(cpu) > 0) in update_ts_time_stats()
501 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) in get_cpu_idle_time_us() argument
503 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in get_cpu_idle_time_us()
511 update_ts_time_stats(cpu, ts, now, last_update_time); in get_cpu_idle_time_us()
514 if (ts->idle_active && !nr_iowait_cpu(cpu)) { in get_cpu_idle_time_us()
542 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) in get_cpu_iowait_time_us() argument
544 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in get_cpu_iowait_time_us()
552 update_ts_time_stats(cpu, ts, now, last_update_time); in get_cpu_iowait_time_us()
555 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { in get_cpu_iowait_time_us()
569 ktime_t now, int cpu) in tick_nohz_stop_sched_tick() argument
623 if (cpu == tick_do_timer_cpu) { in tick_nohz_stop_sched_tick()
678 nohz_balance_enter_idle(cpu); in tick_nohz_stop_sched_tick()
723 int cpu = smp_processor_id(); in tick_nohz_full_stop_tick() local
725 if (!tick_nohz_full_cpu(cpu) || is_idle_task(current)) in tick_nohz_full_stop_tick()
734 tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); in tick_nohz_full_stop_tick()
738 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) in can_stop_idle_tick() argument
747 if (unlikely(!cpu_online(cpu))) { in can_stop_idle_tick()
748 if (cpu == tick_do_timer_cpu) in can_stop_idle_tick()
761 if (unlikely(local_softirq_pending() && cpu_online(cpu))) { in can_stop_idle_tick()
778 if (tick_do_timer_cpu == cpu) in can_stop_idle_tick()
794 int cpu = smp_processor_id(); in __tick_nohz_idle_enter() local
798 if (can_stop_idle_tick(cpu, ts)) { in __tick_nohz_idle_enter()
803 expires = tick_nohz_stop_sched_tick(ts, now, cpu); in __tick_nohz_idle_enter()
1190 void tick_cancel_sched_timer(int cpu) in tick_cancel_sched_timer() argument
1192 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in tick_cancel_sched_timer()
1208 int cpu; in tick_clock_notify() local
1210 for_each_possible_cpu(cpu) in tick_clock_notify()
1211 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); in tick_clock_notify()