/linux-4.1.27/arch/x86/kernel/apic/ |
D | x2apic_cluster.c | 31 unsigned int cpu, this_cpu; in __x2apic_send_IPI_mask() local 39 this_cpu = smp_processor_id(); in __x2apic_send_IPI_mask() 59 if (apic_dest == APIC_DEST_ALLINC || i != this_cpu) in __x2apic_send_IPI_mask() 133 unsigned int this_cpu = smp_processor_id(); in init_x2apic_ldr() local 136 per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); in init_x2apic_ldr() 138 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); in init_x2apic_ldr() 140 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) in init_x2apic_ldr() 142 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); in init_x2apic_ldr() 143 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); in init_x2apic_ldr() 153 unsigned int this_cpu = (unsigned long)hcpu; in update_clusterinfo() local [all …]
|
D | ipi.c | 42 unsigned int this_cpu = smp_processor_id(); in default_send_IPI_mask_allbutself_phys() local 50 if (query_cpu == this_cpu) in default_send_IPI_mask_allbutself_phys() 85 unsigned int this_cpu = smp_processor_id(); in default_send_IPI_mask_allbutself_logical() local 91 if (query_cpu == this_cpu) in default_send_IPI_mask_allbutself_logical()
|
D | apic_numachip.c | 123 unsigned int this_cpu = smp_processor_id(); in numachip_send_IPI_mask_allbutself() local 127 if (cpu != this_cpu) in numachip_send_IPI_mask_allbutself() 134 unsigned int this_cpu = smp_processor_id(); in numachip_send_IPI_allbutself() local 138 if (cpu != this_cpu) in numachip_send_IPI_allbutself()
|
D | x2apic_phys.c | 41 unsigned long this_cpu; in __x2apic_send_IPI_mask() local 48 this_cpu = smp_processor_id(); in __x2apic_send_IPI_mask() 50 if (apic_dest == APIC_DEST_ALLBUT && this_cpu == query_cpu) in __x2apic_send_IPI_mask()
|
D | hw_nmi.c | 61 int this_cpu = get_cpu(); in arch_trigger_all_cpu_backtrace() local 74 cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); in arch_trigger_all_cpu_backtrace()
|
D | x2apic_uv_x.c | 275 unsigned int this_cpu = smp_processor_id(); in uv_send_IPI_mask_allbutself() local 279 if (cpu != this_cpu) in uv_send_IPI_mask_allbutself() 286 unsigned int this_cpu = smp_processor_id(); in uv_send_IPI_allbutself() local 290 if (cpu != this_cpu) in uv_send_IPI_allbutself()
|
/linux-4.1.27/arch/parisc/kernel/ |
D | smp.c | 123 int this_cpu = smp_processor_id(); in ipi_interrupt() local 124 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); in ipi_interrupt() 129 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); in ipi_interrupt() 147 smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu); in ipi_interrupt() 151 smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); in ipi_interrupt() 157 smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); in ipi_interrupt() 162 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); in ipi_interrupt() 166 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu); in ipi_interrupt() 171 smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); in ipi_interrupt() 176 this_cpu, which); in ipi_interrupt()
|
/linux-4.1.27/lib/ |
D | smp_processor_id.c | 13 int this_cpu = raw_smp_processor_id(); in check_preemption_disabled() local 25 if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) in check_preemption_disabled() 51 return this_cpu; in check_preemption_disabled()
|
/linux-4.1.27/kernel/trace/ |
D | trace_clock.c | 93 int this_cpu; in trace_clock_global() local 98 this_cpu = raw_smp_processor_id(); in trace_clock_global() 99 now = sched_clock_cpu(this_cpu); in trace_clock_global()
|
/linux-4.1.27/arch/alpha/kernel/ |
D | smp.c | 525 int this_cpu = smp_processor_id(); in handle_ipi() local 526 unsigned long *pending_ipis = &ipi_data[this_cpu].bits; in handle_ipi() 531 this_cpu, *pending_ipis, regs->pc)); in handle_ipi() 558 this_cpu, which); in handle_ipi() 566 cpu_data[this_cpu].ipi_count++; in handle_ipi() 657 int cpu, this_cpu = smp_processor_id(); in flush_tlb_mm() local 659 if (!cpu_online(cpu) || cpu == this_cpu) in flush_tlb_mm() 706 int cpu, this_cpu = smp_processor_id(); in flush_tlb_page() local 708 if (!cpu_online(cpu) || cpu == this_cpu) in flush_tlb_page() 762 int cpu, this_cpu = smp_processor_id(); in flush_icache_user_range() local [all …]
|
/linux-4.1.27/arch/x86/lib/ |
D | msr-smp.c | 10 int this_cpu = raw_smp_processor_id(); in __rdmsr_on_cpu() local 13 reg = per_cpu_ptr(rv->msrs, this_cpu); in __rdmsr_on_cpu() 24 int this_cpu = raw_smp_processor_id(); in __wrmsr_on_cpu() local 27 reg = per_cpu_ptr(rv->msrs, this_cpu); in __wrmsr_on_cpu() 102 int this_cpu; in __rwmsr_on_cpus() local 109 this_cpu = get_cpu(); in __rwmsr_on_cpus() 111 if (cpumask_test_cpu(this_cpu, mask)) in __rwmsr_on_cpus()
|
/linux-4.1.27/arch/sparc/kernel/ |
D | process_64.c | 195 int this_cpu) in __global_reg_self() argument 201 rp = &global_cpu_snapshot[this_cpu].reg; in __global_reg_self() 247 int this_cpu, cpu; in arch_trigger_all_cpu_backtrace() local 254 this_cpu = raw_smp_processor_id(); in arch_trigger_all_cpu_backtrace() 259 __global_reg_self(tp, regs, this_cpu); in arch_trigger_all_cpu_backtrace() 266 if (!include_self && cpu == this_cpu) in arch_trigger_all_cpu_backtrace() 275 (cpu == this_cpu ? '*' : ' '), cpu, in arch_trigger_all_cpu_backtrace() 312 static void __global_pmu_self(int this_cpu) in __global_pmu_self() argument 320 pp = &global_cpu_snapshot[this_cpu].pmu; in __global_pmu_self() 346 int this_cpu, cpu; in pmu_snapshot_all_cpus() local [all …]
|
D | smp_64.c | 623 int retries, this_cpu, prev_sent, i, saw_cpu_error; in hypervisor_xcall_deliver() local 627 this_cpu = smp_processor_id(); in hypervisor_xcall_deliver() 710 this_cpu, saw_cpu_error - 1); in hypervisor_xcall_deliver() 716 this_cpu, retries); in hypervisor_xcall_deliver() 721 this_cpu, status); in hypervisor_xcall_deliver() 724 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); in hypervisor_xcall_deliver() 727 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); in hypervisor_xcall_deliver() 738 int this_cpu, i, cnt; in xcall_deliver() local 755 this_cpu = smp_processor_id(); in xcall_deliver() 756 tb = &trap_block[this_cpu]; in xcall_deliver() [all …]
|
D | nmi.c | 71 int this_cpu = smp_processor_id(); in die_nmi() local 78 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); in die_nmi() 80 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); in die_nmi()
|
D | chmc.c | 591 unsigned long ret, this_cpu; in chmc_read_mcreg() local 595 this_cpu = real_hard_smp_processor_id(); in chmc_read_mcreg() 597 if (p->portid == this_cpu) { in chmc_read_mcreg()
|
D | entry.h | 246 void sun4v_register_mondo_queues(int this_cpu);
|
D | irq_64.c | 995 void notrace sun4v_register_mondo_queues(int this_cpu) in sun4v_register_mondo_queues() argument 997 struct trap_per_cpu *tb = &trap_block[this_cpu]; in sun4v_register_mondo_queues()
|
/linux-4.1.27/arch/x86/kernel/cpu/ |
D | common.c | 92 static const struct cpu_dev *this_cpu = &default_cpu; variable 372 if (!this_cpu) in table_lookup_model() 375 info = this_cpu->legacy_models; in table_lookup_model() 472 if (this_cpu->legacy_cache_size) in cpu_detect_cache_sizes() 473 l2size = this_cpu->legacy_cache_size(c, l2size); in cpu_detect_cache_sizes() 496 if (this_cpu->c_detect_tlb) in cpu_detect_tlb() 497 this_cpu->c_detect_tlb(c); in cpu_detect_tlb() 572 this_cpu = cpu_devs[i]; in get_cpu_vendor() 573 c->x86_vendor = this_cpu->c_x86_vendor; in get_cpu_vendor() 583 this_cpu = &default_cpu; in get_cpu_vendor() [all …]
|
/linux-4.1.27/arch/s390/kernel/ |
D | machine_kexec.c | 58 int cpu, this_cpu; in setup_regs() local 62 this_cpu = smp_find_processor_id(stap()); in setup_regs() 63 add_elf_notes(this_cpu); in setup_regs() 65 if (cpu == this_cpu) in setup_regs()
|
/linux-4.1.27/arch/blackfin/kernel/ |
D | nmi.c | 146 unsigned int this_cpu = smp_processor_id(); in check_nmi_wdt_touched() local 151 if (!atomic_read(&nmi_touched[this_cpu])) in check_nmi_wdt_touched() 154 atomic_set(&nmi_touched[this_cpu], 0); in check_nmi_wdt_touched() 156 cpumask_clear_cpu(this_cpu, &mask); in check_nmi_wdt_touched()
|
/linux-4.1.27/Documentation/arm/ |
D | vlocks.txt | 35 bool vlock_trylock(int this_cpu) 38 currently_voting[this_cpu] = 1; 41 currently_voting[this_cpu] = 0; 46 last_vote = this_cpu; 47 currently_voting[this_cpu] = 0; 56 if (last_vote == this_cpu) 99 my_town = towns[(this_cpu >> 4) & 0xf]; 100 I_won = vlock_trylock(my_town, this_cpu & 0xf); 103 my_state = states[(this_cpu >> 8) & 0xf]; 104 I_won = vlock_lock(my_state, this_cpu & 0xf)); [all …]
|
/linux-4.1.27/kernel/ |
D | smp.c | 276 int this_cpu; in smp_call_function_single() local 283 this_cpu = get_cpu(); in smp_call_function_single() 291 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() in smp_call_function_single() 408 int cpu, next_cpu, this_cpu = smp_processor_id(); in smp_call_function_many() local 416 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() in smp_call_function_many() 421 if (cpu == this_cpu) in smp_call_function_many() 430 if (next_cpu == this_cpu) in smp_call_function_many() 442 cpumask_clear_cpu(this_cpu, cfd->cpumask); in smp_call_function_many()
|
D | watchdog.c | 293 int this_cpu = smp_processor_id(); in watchdog_overflow_callback() local 301 this_cpu); in watchdog_overflow_callback() 304 this_cpu); in watchdog_overflow_callback()
|
/linux-4.1.27/arch/arm/common/ |
D | bL_switcher.c | 151 unsigned int mpidr, this_cpu, that_cpu; in bL_switch_to() local 157 this_cpu = smp_processor_id(); in bL_switch_to() 161 BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr); in bL_switch_to() 166 that_cpu = bL_switcher_cpu_pairing[this_cpu]; in bL_switch_to() 172 this_cpu, ob_mpidr, ib_mpidr); in bL_switch_to() 174 this_cpu = smp_processor_id(); in bL_switch_to() 182 ipi_nr = register_ipi_completion(&inbound_alive, this_cpu); in bL_switch_to() 229 cpu_logical_map(this_cpu) = ib_mpidr; in bL_switch_to() 239 pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr); in bL_switch_to()
|
/linux-4.1.27/Documentation/ |
D | this_cpu_ops.txt | 1 this_cpu operations 4 this_cpu operations are a way of optimizing access to per cpu 10 this_cpu operations add a per cpu variable offset to the processor 37 The main use of the this_cpu operations has been to optimize counter 40 The following this_cpu() operations with implied preemption protection 61 Inner working of this_cpu operations 87 Consider the following this_cpu operation: 111 after the this_cpu instruction is executed. In general this means that 159 cpu variable. Most this_cpu operations take a cpu variable. 197 if we do not make use of this_cpu ops later to manipulate fields: [all …]
|
D | local_ops.txt | 12 Please use the this_cpu operations instead unless there is really a special purpose. 13 Most uses of local_t in the kernel have been replaced by this_cpu operations. 14 this_cpu operations combine the relocation with the local_t like semantics in
|
D | 00-INDEX | 439 - List rationale behind and the way to use this_cpu operations.
|
/linux-4.1.27/init/ |
D | calibrate.c | 278 int this_cpu = smp_processor_id(); in calibrate_delay() local 280 if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { in calibrate_delay() 281 lpj = per_cpu(cpu_loops_per_jiffy, this_cpu); in calibrate_delay() 305 per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj; in calibrate_delay()
|
/linux-4.1.27/arch/x86/kernel/ |
D | irq.c | 292 unsigned int this_cpu, vector, this_count, count; in check_irq_vectors_for_cpu_disable() local 296 this_cpu = smp_processor_id(); in check_irq_vectors_for_cpu_disable() 298 cpumask_clear_cpu(this_cpu, &online_new); in check_irq_vectors_for_cpu_disable() 310 cpumask_clear_cpu(this_cpu, &affinity_new); in check_irq_vectors_for_cpu_disable() 339 if (cpu == this_cpu) in check_irq_vectors_for_cpu_disable() 357 this_cpu, this_count, count); in check_irq_vectors_for_cpu_disable()
|
/linux-4.1.27/arch/x86/kernel/cpu/mcheck/ |
D | therm_throt.c | 154 unsigned int this_cpu = smp_processor_id(); in therm_throt_process() local 157 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); in therm_throt_process() 194 this_cpu, in therm_throt_process() 202 this_cpu, in therm_throt_process() 213 unsigned int this_cpu = smp_processor_id(); in thresh_event_valid() local 214 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); in thresh_event_valid()
|
/linux-4.1.27/arch/arm/kernel/ |
D | smp_tlb.c | 126 int this_cpu; in broadcast_tlb_mm_a15_erratum() local 132 this_cpu = get_cpu(); in broadcast_tlb_mm_a15_erratum() 133 a15_erratum_get_cpumask(this_cpu, mm, &mask); in broadcast_tlb_mm_a15_erratum()
|
/linux-4.1.27/drivers/cpuidle/ |
D | cpuidle-ux500.c | 29 int this_cpu = smp_processor_id(); in ux500_enter_idle() local 52 if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1)) in ux500_enter_idle()
|
D | coupled.c | 334 static void cpuidle_coupled_poke_others(int this_cpu, in cpuidle_coupled_poke_others() argument 340 if (cpu != this_cpu && cpu_online(cpu)) in cpuidle_coupled_poke_others()
|
/linux-4.1.27/arch/arm/include/asm/ |
D | mmu_context.h | 32 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, 35 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument
|
D | mcpm.h | 280 bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
|
/linux-4.1.27/arch/blackfin/mach-common/ |
D | dpmc.c | 87 unsigned int this_cpu = smp_processor_id(); in bfin_wakeup_cpu() local 91 cpumask_clear_cpu(this_cpu, &mask); in bfin_wakeup_cpu()
|
/linux-4.1.27/kernel/sched/ |
D | deadline.c | 1239 int this_cpu = smp_processor_id(); in find_later_rq() local 1277 if (!cpumask_test_cpu(this_cpu, later_mask)) in find_later_rq() 1278 this_cpu = -1; in find_later_rq() 1288 if (this_cpu != -1 && in find_later_rq() 1289 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { in find_later_rq() 1291 return this_cpu; in find_later_rq() 1311 if (this_cpu != -1) in find_later_rq() 1312 return this_cpu; in find_later_rq() 1478 int this_cpu = this_rq->cpu, ret = 0, cpu; in pull_dl_task() local 1493 if (this_cpu == cpu) in pull_dl_task() [all …]
|
D | rt.c | 1552 int this_cpu = smp_processor_id(); in find_lowest_rq() local 1580 if (!cpumask_test_cpu(this_cpu, lowest_mask)) in find_lowest_rq() 1581 this_cpu = -1; /* Skip this_cpu opt if not among lowest */ in find_lowest_rq() 1592 if (this_cpu != -1 && in find_lowest_rq() 1593 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { in find_lowest_rq() 1595 return this_cpu; in find_lowest_rq() 1613 if (this_cpu != -1) in find_lowest_rq() 1614 return this_cpu; in find_lowest_rq() 1893 int this_cpu; in try_to_push_tasks() local 1896 this_cpu = rt_rq->push_cpu; in try_to_push_tasks() [all …]
|
D | fair.c | 4568 int idx, this_cpu, prev_cpu; in wake_affine() local 4581 this_cpu = smp_processor_id(); in wake_affine() 4584 this_load = target_load(this_cpu, idx); in wake_affine() 4595 this_load += effective_load(tg, this_cpu, -weight, -weight); in wake_affine() 4615 prev_eff_load *= capacity_of(this_cpu); in wake_affine() 4619 effective_load(tg, this_cpu, weight, weight); in wake_affine() 4643 int this_cpu, int sd_flag) in find_idlest_group() argument 4663 local_group = cpumask_test_cpu(this_cpu, in find_idlest_group() 4699 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in find_idlest_cpu() argument 4704 int least_loaded_cpu = this_cpu; in find_idlest_cpu() [all …]
|
D | debug.c | 657 unsigned int this_cpu = raw_smp_processor_id(); in proc_sched_show_task() local 660 t0 = cpu_clock(this_cpu); in proc_sched_show_task() 661 t1 = cpu_clock(this_cpu); in proc_sched_show_task()
|
D | core.c | 1415 int this_cpu = smp_processor_id(); in ttwu_stat() local 1417 if (cpu == this_cpu) { in ttwu_stat() 1425 for_each_domain(this_cpu, sd) { in ttwu_stat() 1617 bool cpus_share_cache(int this_cpu, int that_cpu) in cpus_share_cache() argument 1619 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); in cpus_share_cache()
|
/linux-4.1.27/arch/ia64/kernel/ |
D | smp.c | 99 int this_cpu = get_cpu(); in handle_IPI() local 129 this_cpu, which); in handle_IPI()
|
D | perfmon.c | 5263 int this_cpu = smp_processor_id(); in pfm_overflow_handler() local 5298 pfm_stats[this_cpu].pfm_smpl_handler_calls++; in pfm_overflow_handler() 5321 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles; in pfm_overflow_handler() 5463 int this_cpu = smp_processor_id(); in pfm_do_interrupt_handler() local 5466 pfm_stats[this_cpu].pfm_ovfl_intr_count++; in pfm_do_interrupt_handler() 5498 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++; in pfm_do_interrupt_handler() 5510 this_cpu, task_pid_nr(task)); in pfm_do_interrupt_handler() 5515 this_cpu, in pfm_do_interrupt_handler() 5526 int this_cpu; in pfm_interrupt_handler() local 5530 this_cpu = get_cpu(); in pfm_interrupt_handler() [all …]
|
D | process.c | 215 unsigned int this_cpu = smp_processor_id(); in play_dead() local 223 ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); in play_dead()
|
/linux-4.1.27/tools/perf/ |
D | builtin-sched.c | 1313 int cpu, this_cpu = sample->cpu; in map_switch_event() local 1315 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); in map_switch_event() 1317 if (this_cpu > sched->max_cpu) in map_switch_event() 1318 sched->max_cpu = this_cpu; in map_switch_event() 1320 timestamp0 = sched->cpu_last_switched[this_cpu]; in map_switch_event() 1321 sched->cpu_last_switched[this_cpu] = timestamp; in map_switch_event() 1334 sched->curr_thread[this_cpu] = sched_in; in map_switch_event() 1365 if (cpu != this_cpu) in map_switch_event() 1393 int this_cpu = sample->cpu, err = 0; in process_sched_switch_event() local 1397 if (sched->curr_pid[this_cpu] != (u32)-1) { in process_sched_switch_event() [all …]
|
/linux-4.1.27/arch/arm/mm/ |
D | context.c | 54 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument 64 if (cpu == this_cpu) in a15_erratum_get_cpumask()
|
/linux-4.1.27/kernel/time/ |
D | hrtimer.c | 202 int this_cpu = smp_processor_id(); in switch_hrtimer_base() local 228 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { in switch_hrtimer_base() 229 cpu = this_cpu; in switch_hrtimer_base() 237 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { in switch_hrtimer_base() 238 cpu = this_cpu; in switch_hrtimer_base()
|
/linux-4.1.27/arch/sparc/mm/ |
D | init_64.c | 222 static inline void set_dcache_dirty(struct page *page, int this_cpu) in set_dcache_dirty() argument 224 unsigned long mask = this_cpu; in set_dcache_dirty() 291 int this_cpu = get_cpu(); in flush_dcache() local 296 if (cpu == this_cpu) in flush_dcache() 373 int this_cpu; in flush_dcache_page() local 385 this_cpu = get_cpu(); in flush_dcache_page() 393 if (dirty_cpu == this_cpu) in flush_dcache_page() 397 set_dcache_dirty(page, this_cpu); in flush_dcache_page()
|
/linux-4.1.27/drivers/cpufreq/ |
D | acpi-cpufreq.c | 321 int this_cpu; in drv_write() local 323 this_cpu = get_cpu(); in drv_write() 324 if (cpumask_test_cpu(this_cpu, cmd->mask)) in drv_write()
|
/linux-4.1.27/arch/x86/xen/ |
D | smp.c | 655 unsigned int this_cpu = smp_processor_id(); in xen_send_IPI_mask_allbutself() local 662 if (this_cpu == cpu) in xen_send_IPI_mask_allbutself()
|
/linux-4.1.27/tools/perf/bench/ |
D | numa.c | 1148 int this_cpu; in worker_thread() local 1157 this_cpu = g->threads[task_nr].curr_cpu; in worker_thread() 1158 if (this_cpu < g->p.nr_cpus/2) in worker_thread()
|
/linux-4.1.27/kernel/printk/ |
D | printk.c | 1620 int this_cpu; in vprintk_emit() local 1636 this_cpu = smp_processor_id(); in vprintk_emit() 1641 if (unlikely(logbuf_cpu == this_cpu)) { in vprintk_emit() 1659 logbuf_cpu = this_cpu; in vprintk_emit()
|
/linux-4.1.27/fs/ |
D | eventpoll.c | 508 int this_cpu = get_cpu(); in ep_poll_safewake() local 511 ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); in ep_poll_safewake()
|
/linux-4.1.27/tools/power/x86/turbostat/ |
D | turbostat.c | 1458 int this_cpu; in get_cpu_position_in_core() local 1472 fscanf(filep, "%d", &this_cpu); in get_cpu_position_in_core() 1473 if (this_cpu == cpu) { in get_cpu_position_in_core()
|
/linux-4.1.27/include/linux/ |
D | sched.h | 1047 bool cpus_share_cache(int this_cpu, int that_cpu); 1092 static inline bool cpus_share_cache(int this_cpu, int that_cpu) in cpus_share_cache() argument
|