cpu               121 arch/alpha/include/asm/mmu_context.h __get_new_mm_context(struct mm_struct *mm, long cpu)
cpu               123 arch/alpha/include/asm/mmu_context.h 	unsigned long asn = cpu_last_asn(cpu);
cpu               131 arch/alpha/include/asm/mmu_context.h 	cpu_last_asn(cpu) = next;
cpu               142 arch/alpha/include/asm/mmu_context.h 	long cpu = smp_processor_id();
cpu               145 arch/alpha/include/asm/mmu_context.h 	cpu_data[cpu].asn_lock = 1;
cpu               148 arch/alpha/include/asm/mmu_context.h 	asn = cpu_last_asn(cpu);
cpu               149 arch/alpha/include/asm/mmu_context.h 	mmc = next_mm->context[cpu];
cpu               151 arch/alpha/include/asm/mmu_context.h 		mmc = __get_new_mm_context(next_mm, cpu);
cpu               152 arch/alpha/include/asm/mmu_context.h 		next_mm->context[cpu] = mmc;
cpu               156 arch/alpha/include/asm/mmu_context.h 		cpu_data[cpu].need_new_asn = 1;
cpu               190 arch/alpha/include/asm/mmu_context.h 	int cpu = smp_processor_id();				\
cpu               191 arch/alpha/include/asm/mmu_context.h 	cpu_data[cpu].asn_lock = 0;				\
cpu               193 arch/alpha/include/asm/mmu_context.h 	if (cpu_data[cpu].need_new_asn) {			\
cpu               195 arch/alpha/include/asm/mmu_context.h 		cpu_data[cpu].need_new_asn = 0;			\
cpu               196 arch/alpha/include/asm/mmu_context.h 		if (!mm->context[cpu])			\
cpu                44 arch/alpha/include/asm/smp.h #define raw_smp_processor_id()	(current_thread_info()->cpu)
cpu                48 arch/alpha/include/asm/smp.h extern void arch_send_call_function_single_ipi(int cpu);
cpu                54 arch/alpha/include/asm/smp.h #define smp_call_function_on_cpu(func,info,wait,cpu)    ({ 0; })
cpu                23 arch/alpha/include/asm/thread_info.h 	unsigned		cpu;		/* current CPU */
cpu                11 arch/alpha/include/asm/topology.h static inline int cpu_to_node(int cpu)
cpu                18 arch/alpha/include/asm/topology.h 	node = alpha_mv.cpuid_to_nid(cpu);
cpu                31 arch/alpha/include/asm/topology.h 	int cpu;
cpu                38 arch/alpha/include/asm/topology.h 	for_each_online_cpu(cpu) {
cpu                39 arch/alpha/include/asm/topology.h 		if (cpu_to_node(cpu) == node)
cpu                40 arch/alpha/include/asm/topology.h 			cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
cpu                19 arch/alpha/kernel/asm-offsets.c 	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
cpu                10 arch/alpha/kernel/bugs.c 	struct percpu_struct *cpu;
cpu                13 arch/alpha/kernel/bugs.c         cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
cpu                14 arch/alpha/kernel/bugs.c         cputype = cpu->type & 0xffffffff;
cpu               206 arch/alpha/kernel/core_irongate.c 	struct percpu_struct *cpu;
cpu               209 arch/alpha/kernel/core_irongate.c 	cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
cpu               210 arch/alpha/kernel/core_irongate.c 	pal_rev = cpu->pal_revision & 0xffff;
cpu               211 arch/alpha/kernel/core_irongate.c 	pal_var = (cpu->pal_revision >> 16) & 0xff;
cpu                92 arch/alpha/kernel/core_mcpcia.c 	unsigned int stat0, value, cpu;
cpu                94 arch/alpha/kernel/core_mcpcia.c 	cpu = smp_processor_id();
cpu               110 arch/alpha/kernel/core_mcpcia.c 	mcheck_expected(cpu) = 1;
cpu               111 arch/alpha/kernel/core_mcpcia.c 	mcheck_taken(cpu) = 0;
cpu               112 arch/alpha/kernel/core_mcpcia.c 	mcheck_extra(cpu) = mid;
cpu               120 arch/alpha/kernel/core_mcpcia.c 	if (mcheck_taken(cpu)) {
cpu               121 arch/alpha/kernel/core_mcpcia.c 		mcheck_taken(cpu) = 0;
cpu               125 arch/alpha/kernel/core_mcpcia.c 	mcheck_expected(cpu) = 0;
cpu               140 arch/alpha/kernel/core_mcpcia.c 	unsigned int stat0, cpu;
cpu               142 arch/alpha/kernel/core_mcpcia.c 	cpu = smp_processor_id();
cpu               153 arch/alpha/kernel/core_mcpcia.c 	mcheck_expected(cpu) = 1;
cpu               154 arch/alpha/kernel/core_mcpcia.c 	mcheck_extra(cpu) = mid;
cpu               162 arch/alpha/kernel/core_mcpcia.c 	mcheck_expected(cpu) = 0;
cpu               256 arch/alpha/kernel/core_mcpcia.c 	int cpu = smp_processor_id();
cpu               267 arch/alpha/kernel/core_mcpcia.c 	mcheck_expected(cpu) = 2;	/* indicates probing */
cpu               268 arch/alpha/kernel/core_mcpcia.c 	mcheck_taken(cpu) = 0;
cpu               269 arch/alpha/kernel/core_mcpcia.c 	mcheck_extra(cpu) = mid;
cpu               277 arch/alpha/kernel/core_mcpcia.c 	if (mcheck_taken(cpu)) {
cpu               278 arch/alpha/kernel/core_mcpcia.c 		mcheck_taken(cpu) = 0;
cpu               282 arch/alpha/kernel/core_mcpcia.c 	mcheck_expected(cpu) = 0;
cpu               579 arch/alpha/kernel/core_mcpcia.c 	unsigned int cpu = smp_processor_id();
cpu               583 arch/alpha/kernel/core_mcpcia.c 	expected = mcheck_expected(cpu);
cpu               600 arch/alpha/kernel/core_mcpcia.c 		mcpcia_pci_clr_err(mcheck_extra(cpu));
cpu               183 arch/alpha/kernel/core_t2.c 	unsigned int value, cpu, taken;
cpu               186 arch/alpha/kernel/core_t2.c 	cpu = smp_processor_id();
cpu               199 arch/alpha/kernel/core_t2.c 	mcheck_expected(cpu) = 1;
cpu               200 arch/alpha/kernel/core_t2.c 	mcheck_taken(cpu) = 0;
cpu               201 arch/alpha/kernel/core_t2.c 	t2_mcheck_any_expected |= (1 << cpu);
cpu               215 arch/alpha/kernel/core_t2.c 	if ((taken = mcheck_taken(cpu))) {
cpu               216 arch/alpha/kernel/core_t2.c 		mcheck_taken(cpu) = 0;
cpu               217 arch/alpha/kernel/core_t2.c 		t2_mcheck_last_taken |= (1 << cpu);
cpu               221 arch/alpha/kernel/core_t2.c 	mcheck_expected(cpu) = 0;
cpu               237 arch/alpha/kernel/core_t2.c 	unsigned int cpu, taken;
cpu               240 arch/alpha/kernel/core_t2.c 	cpu = smp_processor_id();
cpu               251 arch/alpha/kernel/core_t2.c 	mcheck_expected(cpu) = 1;
cpu               252 arch/alpha/kernel/core_t2.c 	mcheck_taken(cpu) = 0;
cpu               253 arch/alpha/kernel/core_t2.c 	t2_mcheck_any_expected |= (1 << cpu);
cpu               267 arch/alpha/kernel/core_t2.c 	if ((taken = mcheck_taken(cpu))) {
cpu               268 arch/alpha/kernel/core_t2.c 		mcheck_taken(cpu) = 0;
cpu               269 arch/alpha/kernel/core_t2.c 		t2_mcheck_last_taken |= (1 << cpu);
cpu               272 arch/alpha/kernel/core_t2.c 	mcheck_expected(cpu) = 0;
cpu               526 arch/alpha/kernel/core_t2.c t2_clear_errors(int cpu)
cpu               530 arch/alpha/kernel/core_t2.c 	cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu);
cpu               559 arch/alpha/kernel/core_t2.c 	int cpu = smp_processor_id();
cpu               568 arch/alpha/kernel/core_t2.c 	t2_clear_errors(cpu);
cpu               576 arch/alpha/kernel/core_t2.c 	if (!mcheck_expected(cpu) && t2_mcheck_any_expected) {
cpu               587 arch/alpha/kernel/core_t2.c 			       " code 0x%x\n", cpu, t2_mcheck_any_expected,
cpu               594 arch/alpha/kernel/core_t2.c 	if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) {
cpu               595 arch/alpha/kernel/core_t2.c 		if (t2_mcheck_last_taken & (1 << cpu)) {
cpu               600 arch/alpha/kernel/core_t2.c 			       cpu, t2_mcheck_last_taken,
cpu               617 arch/alpha/kernel/core_t2.c 		       (mcheck_expected(cpu) ? "EX" : "UN"), cpu,
cpu               623 arch/alpha/kernel/core_t2.c 	process_mcheck_info(vector, la_ptr, "T2", mcheck_expected(cpu));
cpu               204 arch/alpha/kernel/core_tsunami.c 	int cpu = smp_processor_id();
cpu               207 arch/alpha/kernel/core_tsunami.c 	mcheck_taken(cpu) = 0;
cpu               208 arch/alpha/kernel/core_tsunami.c 	mcheck_expected(cpu) = 1;
cpu               212 arch/alpha/kernel/core_tsunami.c 	mcheck_expected(cpu) = 0;
cpu               213 arch/alpha/kernel/core_tsunami.c 	probe_result = !mcheck_taken(cpu);
cpu               214 arch/alpha/kernel/core_tsunami.c 	mcheck_taken(cpu) = 0;
cpu               242 arch/alpha/kernel/err_common.c cdl_process_console_data_log(int cpu, struct percpu_struct *pcpu)
cpu               250 arch/alpha/kernel/err_common.c 	       err_print_prefix, cpu);
cpu               260 arch/alpha/kernel/err_common.c 	       err_print_prefix, err, cpu);
cpu               267 arch/alpha/kernel/err_common.c 	unsigned long cpu;
cpu               269 arch/alpha/kernel/err_common.c 	for (cpu = 0; cpu < hwrpb->nr_processors; cpu++) {
cpu               272 arch/alpha/kernel/err_common.c 			 + cpu * hwrpb->processor_size);
cpu               274 arch/alpha/kernel/err_common.c 			cdl_process_console_data_log(cpu, pcpu);
cpu                49 arch/alpha/kernel/irq.c 	int cpu = last_cpu + 1;
cpu                58 arch/alpha/kernel/irq.c 	while (!cpu_possible(cpu) ||
cpu                59 arch/alpha/kernel/irq.c 	       !cpumask_test_cpu(cpu, irq_default_affinity))
cpu                60 arch/alpha/kernel/irq.c 		cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
cpu                61 arch/alpha/kernel/irq.c 	last_cpu = cpu;
cpu                63 arch/alpha/kernel/irq.c 	cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu));
cpu                64 arch/alpha/kernel/irq.c 	chip->irq_set_affinity(data, cpumask_of(cpu), false);
cpu               147 arch/alpha/kernel/irq_alpha.c 		int cpu = smp_processor_id();
cpu               148 arch/alpha/kernel/irq_alpha.c 		mcheck_expected(cpu) = 0;
cpu               149 arch/alpha/kernel/irq_alpha.c 		mcheck_taken(cpu) = 1;
cpu               765 arch/alpha/kernel/osf_sys.c 	struct percpu_struct *cpu;
cpu               797 arch/alpha/kernel/osf_sys.c 		cpu = (struct percpu_struct*)
cpu               799 arch/alpha/kernel/osf_sys.c 		w = cpu->type;
cpu               582 arch/alpha/kernel/perf_event.c 	struct percpu_struct *cpu;
cpu               586 arch/alpha/kernel/perf_event.c 	cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
cpu               587 arch/alpha/kernel/perf_event.c 	cputype = cpu->type & 0xffffffff;
cpu               781 arch/alpha/kernel/perf_event.c 	int cpu;
cpu               788 arch/alpha/kernel/perf_event.c 	cpu = smp_processor_id();
cpu               794 arch/alpha/kernel/perf_event.c 	pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
cpu               206 arch/alpha/kernel/proto.h #define mcheck_expected(cpu)	(cpu_data[cpu].mcheck_expected)
cpu               207 arch/alpha/kernel/proto.h #define mcheck_taken(cpu)	(cpu_data[cpu].mcheck_taken)
cpu               208 arch/alpha/kernel/proto.h #define mcheck_extra(cpu)	(cpu_data[cpu].mcheck_extra)
cpu               217 arch/alpha/kernel/proto.h #define mcheck_expected(cpu)	(*((void)(cpu), &__mcheck_info.expected))
cpu               218 arch/alpha/kernel/proto.h #define mcheck_taken(cpu)	(*((void)(cpu), &__mcheck_info.taken))
cpu               219 arch/alpha/kernel/proto.h #define mcheck_extra(cpu)	(*((void)(cpu), &__mcheck_info.extra))
cpu               423 arch/alpha/kernel/setup.c 		struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
cpu               439 arch/alpha/kernel/setup.c 	struct percpu_struct *cpu;
cpu               561 arch/alpha/kernel/setup.c 	cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
cpu               564 arch/alpha/kernel/setup.c 		     cpu->type, &type_name, &var_name);
cpu               570 arch/alpha/kernel/setup.c 				 cpu->type);
cpu               640 arch/alpha/kernel/setup.c 	determine_cpu_caches(cpu->type);
cpu               742 arch/alpha/kernel/setup.c get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
cpu               875 arch/alpha/kernel/setup.c 		cpu &= 0xffffffff; /* make it usable */
cpu               887 arch/alpha/kernel/setup.c 			if (vec == &eb164_mv && cpu == EV56_CPU)
cpu               912 arch/alpha/kernel/setup.c 			if (cpu == EV5_CPU || cpu == EV56_CPU)
cpu               918 arch/alpha/kernel/setup.c 			if (cpu == EV5_CPU || cpu == EV56_CPU)
cpu               924 arch/alpha/kernel/setup.c 			if (cpu == EV5_CPU || cpu == EV56_CPU)
cpu               989 arch/alpha/kernel/setup.c get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
cpu              1018 arch/alpha/kernel/setup.c 	cpu &= 0xffffffff; /* make it usable */
cpu              1029 arch/alpha/kernel/setup.c 		if (eb164_indices[member] == 0 && cpu == EV56_CPU)
cpu              1105 arch/alpha/kernel/setup.c 	struct percpu_struct *cpu;
cpu              1110 arch/alpha/kernel/setup.c 		cpu = (struct percpu_struct *)
cpu              1112 arch/alpha/kernel/setup.c 		if ((cpu->flags & 0x1cc) == 0x1cc)
cpu              1144 arch/alpha/kernel/setup.c 	struct percpu_struct *cpu = slot;
cpu              1152 arch/alpha/kernel/setup.c 	cpu_index = (unsigned) (cpu->type - 1);
cpu              1158 arch/alpha/kernel/setup.c 		     cpu->type, &systype_name, &sysvariation_name);
cpu              1160 arch/alpha/kernel/setup.c 	nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
cpu              1187 arch/alpha/kernel/setup.c 		       cpu_name, cpu->variation, cpu->revision,
cpu              1188 arch/alpha/kernel/setup.c 		       (char*)cpu->serial_no,
cpu               202 arch/alpha/kernel/smp.c 	struct percpu_struct *cpu;
cpu               207 arch/alpha/kernel/smp.c 	cpu = (struct percpu_struct *)
cpu               218 arch/alpha/kernel/smp.c 	*(unsigned int *)&cpu->ipc_buffer[0] = len;
cpu               219 arch/alpha/kernel/smp.c 	cp1 = (char *) &cpu->ipc_buffer[1];
cpu               243 arch/alpha/kernel/smp.c 	struct percpu_struct *cpu;
cpu               256 arch/alpha/kernel/smp.c 		cpu = (struct percpu_struct *)
cpu               263 arch/alpha/kernel/smp.c 		      mycpu, i, cpu->halt_reason, cpu->flags));
cpu               265 arch/alpha/kernel/smp.c 		cnt = cpu->ipc_buffer[0] >> 32;
cpu               269 arch/alpha/kernel/smp.c 			cp1 = (char *) &cpu->ipc_buffer[1];
cpu               294 arch/alpha/kernel/smp.c 	struct percpu_struct *cpu;
cpu               298 arch/alpha/kernel/smp.c 	cpu = (struct percpu_struct *)
cpu               302 arch/alpha/kernel/smp.c 	hwpcb = (struct pcb_struct *) cpu->hwpcb;
cpu               337 arch/alpha/kernel/smp.c 	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
cpu               338 arch/alpha/kernel/smp.c 	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
cpu               346 arch/alpha/kernel/smp.c 		if (cpu->flags & 1)
cpu               405 arch/alpha/kernel/smp.c 	struct percpu_struct *cpubase, *cpu;
cpu               424 arch/alpha/kernel/smp.c 			cpu = (struct percpu_struct *)
cpu               426 arch/alpha/kernel/smp.c 			if ((cpu->flags & 0x1cc) == 0x1cc) {
cpu               430 arch/alpha/kernel/smp.c 				cpu->pal_revision = boot_cpu_palrev;
cpu               434 arch/alpha/kernel/smp.c 			      i, cpu->flags, cpu->type));
cpu               436 arch/alpha/kernel/smp.c 			      i, cpu->pal_revision));
cpu               455 arch/alpha/kernel/smp.c 	current_thread_info()->cpu = boot_cpuid;
cpu               479 arch/alpha/kernel/smp.c __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu               481 arch/alpha/kernel/smp.c 	smp_boot_one_cpu(cpu, tidle);
cpu               483 arch/alpha/kernel/smp.c 	return cpu_online(cpu) ? 0 : -ENOSYS;
cpu               489 arch/alpha/kernel/smp.c 	int cpu;
cpu               492 arch/alpha/kernel/smp.c 	for(cpu = 0; cpu < NR_CPUS; cpu++) 
cpu               493 arch/alpha/kernel/smp.c 		if (cpu_online(cpu))
cpu               494 arch/alpha/kernel/smp.c 			bogosum += cpu_data[cpu].loops_per_jiffy;
cpu               574 arch/alpha/kernel/smp.c smp_send_reschedule(int cpu)
cpu               577 arch/alpha/kernel/smp.c 	if (cpu == hard_smp_processor_id())
cpu               581 arch/alpha/kernel/smp.c 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
cpu               602 arch/alpha/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               604 arch/alpha/kernel/smp.c 	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
cpu               655 arch/alpha/kernel/smp.c 			int cpu, this_cpu = smp_processor_id();
cpu               656 arch/alpha/kernel/smp.c 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpu               657 arch/alpha/kernel/smp.c 				if (!cpu_online(cpu) || cpu == this_cpu)
cpu               659 arch/alpha/kernel/smp.c 				if (mm->context[cpu])
cpu               660 arch/alpha/kernel/smp.c 					mm->context[cpu] = 0;
cpu               702 arch/alpha/kernel/smp.c 			int cpu, this_cpu = smp_processor_id();
cpu               703 arch/alpha/kernel/smp.c 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpu               704 arch/alpha/kernel/smp.c 				if (!cpu_online(cpu) || cpu == this_cpu)
cpu               706 arch/alpha/kernel/smp.c 				if (mm->context[cpu])
cpu               707 arch/alpha/kernel/smp.c 					mm->context[cpu] = 0;
cpu               756 arch/alpha/kernel/smp.c 			int cpu, this_cpu = smp_processor_id();
cpu               757 arch/alpha/kernel/smp.c 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpu               758 arch/alpha/kernel/smp.c 				if (!cpu_online(cpu) || cpu == this_cpu)
cpu               760 arch/alpha/kernel/smp.c 				if (mm->context[cpu])
cpu               761 arch/alpha/kernel/smp.c 					mm->context[cpu] = 0;
cpu               139 arch/alpha/kernel/sys_dp264.c 	int cpu;
cpu               141 arch/alpha/kernel/sys_dp264.c 	for (cpu = 0; cpu < 4; cpu++) {
cpu               142 arch/alpha/kernel/sys_dp264.c 		unsigned long aff = cpu_irq_affinity[cpu];
cpu               143 arch/alpha/kernel/sys_dp264.c 		if (cpumask_test_cpu(cpu, &affinity))
cpu               147 arch/alpha/kernel/sys_dp264.c 		cpu_irq_affinity[cpu] = aff;
cpu               131 arch/alpha/kernel/sys_sx164.c 	struct percpu_struct *cpu = (struct percpu_struct*)
cpu               136 arch/alpha/kernel/sys_sx164.c 	    && (cpu->pal_revision & 0xffff) <= 0x117) {
cpu               138 arch/alpha/kernel/sys_titan.c 	int cpu;
cpu               140 arch/alpha/kernel/sys_titan.c 	for (cpu = 0; cpu < 4; cpu++) {
cpu               141 arch/alpha/kernel/sys_titan.c 		if (cpumask_test_cpu(cpu, &affinity))
cpu               142 arch/alpha/kernel/sys_titan.c 			titan_cpu_irq_affinity[cpu] |= 1UL << irq;
cpu               144 arch/alpha/kernel/sys_titan.c 			titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
cpu                93 arch/alpha/kernel/time.c 	int cpu = smp_processor_id();
cpu                94 arch/alpha/kernel/time.c 	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
cpu               118 arch/alpha/kernel/time.c 	int cpu = smp_processor_id();
cpu               119 arch/alpha/kernel/time.c 	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
cpu               125 arch/alpha/kernel/time.c 		.cpumask = cpumask_of(cpu),
cpu               175 arch/alpha/kernel/time.c 	int cpu = smp_processor_id();
cpu               176 arch/alpha/kernel/time.c 	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
cpu               185 arch/alpha/kernel/time.c 	int cpu = smp_processor_id();
cpu               186 arch/alpha/kernel/time.c 	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
cpu               192 arch/alpha/kernel/time.c 		.cpumask = cpumask_of(cpu),
cpu               313 arch/alpha/kernel/time.c 	struct percpu_struct *cpu;
cpu               316 arch/alpha/kernel/time.c 	cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
cpu               317 arch/alpha/kernel/time.c 	index = cpu->type & 0xffffffff;
cpu                48 arch/arc/include/asm/mmu_context.h #define asid_mm(mm, cpu)	mm->context.asid[cpu]
cpu                49 arch/arc/include/asm/mmu_context.h #define hw_pid(mm, cpu)		(asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
cpu                52 arch/arc/include/asm/mmu_context.h #define asid_cpu(cpu)		per_cpu(asid_cache, cpu)
cpu                60 arch/arc/include/asm/mmu_context.h 	const unsigned int cpu = smp_processor_id();
cpu                75 arch/arc/include/asm/mmu_context.h 	if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
cpu                79 arch/arc/include/asm/mmu_context.h 	if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
cpu                88 arch/arc/include/asm/mmu_context.h 		if (!asid_cpu(cpu))
cpu                89 arch/arc/include/asm/mmu_context.h 			asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
cpu                93 arch/arc/include/asm/mmu_context.h 	asid_mm(mm, cpu) = asid_cpu(cpu);
cpu                96 arch/arc/include/asm/mmu_context.h 	write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);
cpu               132 arch/arc/include/asm/mmu_context.h 	const int cpu = smp_processor_id();
cpu               145 arch/arc/include/asm/mmu_context.h 	cpumask_set_cpu(cpu, mm_cpumask(next));
cpu                15 arch/arc/include/asm/smp.h #define raw_smp_processor_id() (current_thread_info()->cpu)
cpu                23 arch/arc/include/asm/smp.h extern void arch_send_call_function_single_ipi(int cpu);
cpu                39 arch/arc/include/asm/smp.h extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
cpu                57 arch/arc/include/asm/smp.h 	void		(*init_per_cpu)(int cpu);
cpu                58 arch/arc/include/asm/smp.h 	void		(*cpu_kick)(int cpu, unsigned long pc);
cpu                59 arch/arc/include/asm/smp.h 	void		(*ipi_send)(int cpu);
cpu                44 arch/arc/include/asm/thread_info.h 	__u32 cpu;			/* current CPU */
cpu                57 arch/arc/include/asm/thread_info.h 	.cpu        = 0,			\
cpu                26 arch/arc/kernel/mcip.c static void mcip_update_gfrc_halt_mask(int cpu)
cpu                45 arch/arc/kernel/mcip.c 	gfrc_halt_mask |= BIT(cpu);
cpu                51 arch/arc/kernel/mcip.c static void mcip_update_debug_halt_mask(int cpu)
cpu                66 arch/arc/kernel/mcip.c 	mcip_mask |= BIT(cpu);
cpu                79 arch/arc/kernel/mcip.c static void mcip_setup_per_cpu(int cpu)
cpu                85 arch/arc/kernel/mcip.c 	smp_ipi_irq_setup(cpu, IPI_IRQ);
cpu                86 arch/arc/kernel/mcip.c 	smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
cpu                90 arch/arc/kernel/mcip.c 		mcip_update_gfrc_halt_mask(cpu);
cpu                94 arch/arc/kernel/mcip.c 		mcip_update_debug_halt_mask(cpu);
cpu                97 arch/arc/kernel/mcip.c static void mcip_ipi_send(int cpu)
cpu               103 arch/arc/kernel/mcip.c 	if (unlikely(cpu == raw_smp_processor_id())) {
cpu               116 arch/arc/kernel/mcip.c 	__mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
cpu               119 arch/arc/kernel/mcip.c 		__mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
cpu               126 arch/arc/kernel/mcip.c 	unsigned int cpu, c;
cpu               139 arch/arc/kernel/mcip.c 	cpu = read_aux_reg(ARC_REG_MCIP_READBACK);	/* 1,2,4,8... */
cpu               147 arch/arc/kernel/mcip.c 		c = __ffs(cpu);			/* 0,1,2,3 */
cpu               149 arch/arc/kernel/mcip.c 		cpu &= ~(1U << c);
cpu               150 arch/arc/kernel/mcip.c 	} while (cpu);
cpu                66 arch/arc/kernel/setup.c static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
cpu                74 arch/arc/kernel/setup.c 			cpu->iccm.sz = 4096 << iccm.sz;	/* 8K to 512K */
cpu                75 arch/arc/kernel/setup.c 			cpu->iccm.base_addr = iccm.base << 16;
cpu                81 arch/arc/kernel/setup.c 			cpu->dccm.sz = 2048 << dccm.sz;	/* 2K to 256K */
cpu                84 arch/arc/kernel/setup.c 			cpu->dccm.base_addr = base & ~0xF;
cpu                93 arch/arc/kernel/setup.c 			cpu->iccm.sz = 256 << iccm.sz00;	/* 512B to 16M */
cpu                95 arch/arc/kernel/setup.c 				cpu->iccm.sz <<= iccm.sz01;
cpu                98 arch/arc/kernel/setup.c 			cpu->iccm.base_addr = region & 0xF0000000;
cpu               103 arch/arc/kernel/setup.c 			cpu->dccm.sz = 256 << dccm.sz0;
cpu               105 arch/arc/kernel/setup.c 				cpu->dccm.sz <<= dccm.sz1;
cpu               108 arch/arc/kernel/setup.c 			cpu->dccm.base_addr = region & 0xF0000000;
cpu               113 arch/arc/kernel/setup.c static void decode_arc_core(struct cpuinfo_arc *cpu)
cpu               124 arch/arc/kernel/setup.c 	if (cpu->core.family < 0x54) { /* includes arc700 */
cpu               127 arch/arc/kernel/setup.c 			if (cpu->core.family == tbl->id) {
cpu               128 arch/arc/kernel/setup.c 				cpu->release = tbl->str;
cpu               134 arch/arc/kernel/setup.c 			cpu->name = "ARC700";
cpu               136 arch/arc/kernel/setup.c 			cpu->name = "HS38";
cpu               138 arch/arc/kernel/setup.c 			cpu->name = cpu->release = "Unknown";
cpu               152 arch/arc/kernel/setup.c 		cpu->name = "HS48";
cpu               153 arch/arc/kernel/setup.c 		cpu->extn.dual = 1;
cpu               156 arch/arc/kernel/setup.c 		cpu->name = "HS38";
cpu               161 arch/arc/kernel/setup.c 			cpu->release = tbl->str;
cpu               171 arch/arc/kernel/setup.c 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
cpu               175 arch/arc/kernel/setup.c 	FIX_PTR(cpu);
cpu               177 arch/arc/kernel/setup.c 	READ_BCR(AUX_IDENTITY, cpu->core);
cpu               178 arch/arc/kernel/setup.c 	decode_arc_core(cpu);
cpu               181 arch/arc/kernel/setup.c 	cpu->extn.timer0 = timer.t0;
cpu               182 arch/arc/kernel/setup.c 	cpu->extn.timer1 = timer.t1;
cpu               183 arch/arc/kernel/setup.c 	cpu->extn.rtc = timer.rtc;
cpu               185 arch/arc/kernel/setup.c 	cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
cpu               187 arch/arc/kernel/setup.c 	READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
cpu               190 arch/arc/kernel/setup.c 	read_decode_ccm_bcr(cpu);
cpu               201 arch/arc/kernel/setup.c 		cpu->extn.fpu_sp = sp.ver ? 1 : 0;
cpu               202 arch/arc/kernel/setup.c 		cpu->extn.fpu_dp = dp.ver ? 1 : 0;
cpu               205 arch/arc/kernel/setup.c 		cpu->bpu.ver = bpu.ver;
cpu               206 arch/arc/kernel/setup.c 		cpu->bpu.full = bpu.fam ? 1 : 0;
cpu               208 arch/arc/kernel/setup.c 			cpu->bpu.num_cache = 256 << (bpu.ent - 1);
cpu               209 arch/arc/kernel/setup.c 			cpu->bpu.num_pred = 256 << (bpu.ent - 1);
cpu               216 arch/arc/kernel/setup.c 		cpu->extn.fpu_sp = spdp.sp ? 1 : 0;
cpu               217 arch/arc/kernel/setup.c 		cpu->extn.fpu_dp = spdp.dp ? 1 : 0;
cpu               220 arch/arc/kernel/setup.c 		cpu->bpu.ver = bpu.ver;
cpu               221 arch/arc/kernel/setup.c 		cpu->bpu.full = bpu.ft;
cpu               222 arch/arc/kernel/setup.c 		cpu->bpu.num_cache = 256 << bpu.bce;
cpu               223 arch/arc/kernel/setup.c 		cpu->bpu.num_pred = 2048 << bpu.pte;
cpu               224 arch/arc/kernel/setup.c 		cpu->bpu.ret_stk = 4 << bpu.rse;
cpu               227 arch/arc/kernel/setup.c 		if (cpu->extn.dual) {
cpu               231 arch/arc/kernel/setup.c 			cpu->extn.dual_enb = !(exec_ctrl & 1);
cpu               237 arch/arc/kernel/setup.c 		cpu->extn.ap_num = 2 << ap.num;
cpu               238 arch/arc/kernel/setup.c 		cpu->extn.ap_full = !ap.min;
cpu               242 arch/arc/kernel/setup.c 	cpu->extn.smart = bcr.ver ? 1 : 0;
cpu               245 arch/arc/kernel/setup.c 	cpu->extn.rtt = bcr.ver ? 1 : 0;
cpu               252 arch/arc/kernel/setup.c 			cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
cpu               256 arch/arc/kernel/setup.c 			cpu->isa.atomic = bcr.info & 1;
cpu               259 arch/arc/kernel/setup.c 		cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
cpu               262 arch/arc/kernel/setup.c 		if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
cpu               263 arch/arc/kernel/setup.c 			cpu->name = "ARC750";
cpu               265 arch/arc/kernel/setup.c 		cpu->isa = isa;
cpu               271 arch/arc/kernel/setup.c 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
cpu               272 arch/arc/kernel/setup.c 	struct bcr_identity *core = &cpu->core;
cpu               276 arch/arc/kernel/setup.c 	FIX_PTR(cpu);
cpu               283 arch/arc/kernel/setup.c 		       cpu_id, cpu->name, cpu->release,
cpu               285 arch/arc/kernel/setup.c 		       IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
cpu               286 arch/arc/kernel/setup.c 		       IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
cpu               289 arch/arc/kernel/setup.c 		       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
cpu               290 arch/arc/kernel/setup.c 		       IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
cpu               291 arch/arc/kernel/setup.c 		       IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
cpu               292 arch/arc/kernel/setup.c 		       IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
cpu               294 arch/arc/kernel/setup.c 	if (cpu->extn_mpy.ver) {
cpu               301 arch/arc/kernel/setup.c 			if (cpu->extn_mpy.dsp)	/* OPT 7-9 */
cpu               302 arch/arc/kernel/setup.c 				opt = cpu->extn_mpy.dsp + 6;
cpu               309 arch/arc/kernel/setup.c 		       IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
cpu               310 arch/arc/kernel/setup.c 		       IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
cpu               311 arch/arc/kernel/setup.c 		       IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
cpu               312 arch/arc/kernel/setup.c 		       IS_AVAIL1(cpu->extn_mpy.ver, mpy_opt),
cpu               313 arch/arc/kernel/setup.c 		       IS_AVAIL1(cpu->isa.div_rem, "div_rem "));
cpu               315 arch/arc/kernel/setup.c 	if (cpu->bpu.ver) {
cpu               318 arch/arc/kernel/setup.c 			      IS_AVAIL1(cpu->bpu.full, "full"),
cpu               319 arch/arc/kernel/setup.c 			      IS_AVAIL1(!cpu->bpu.full, "partial"),
cpu               320 arch/arc/kernel/setup.c 			      cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
cpu               344 arch/arc/kernel/setup.c 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
cpu               346 arch/arc/kernel/setup.c 	FIX_PTR(cpu);
cpu               348 arch/arc/kernel/setup.c 	n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
cpu               350 arch/arc/kernel/setup.c 	if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
cpu               352 arch/arc/kernel/setup.c 			       IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
cpu               353 arch/arc/kernel/setup.c 			       IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
cpu               355 arch/arc/kernel/setup.c 	if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
cpu               357 arch/arc/kernel/setup.c 			       IS_AVAIL1(cpu->extn.smart, "smaRT "),
cpu               358 arch/arc/kernel/setup.c 			       IS_AVAIL1(cpu->extn.rtt, "RTT "));
cpu               359 arch/arc/kernel/setup.c 		if (cpu->extn.ap_num) {
cpu               361 arch/arc/kernel/setup.c 				       cpu->extn.ap_num,
cpu               362 arch/arc/kernel/setup.c 				       cpu->extn.ap_full ? "full":"min");
cpu               367 arch/arc/kernel/setup.c 	if (cpu->dccm.sz || cpu->iccm.sz)
cpu               369 arch/arc/kernel/setup.c 			       cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
cpu               370 arch/arc/kernel/setup.c 			       cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
cpu               395 arch/arc/kernel/setup.c 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
cpu               399 arch/arc/kernel/setup.c 	if (!cpu->extn.timer0)
cpu               402 arch/arc/kernel/setup.c 	if (!cpu->extn.timer1)
cpu               410 arch/arc/kernel/setup.c 	if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
cpu               413 arch/arc/kernel/setup.c 	if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz)
cpu               418 arch/arc/kernel/setup.c 	if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz)
cpu               433 arch/arc/kernel/setup.c 		present = cpu->extn.fpu_dp;
cpu               439 arch/arc/kernel/setup.c 		present = cpu->extn_mpy.dsp | cpu->extn.fpu_sp | cpu->extn.fpu_dp;
cpu               690 arch/arc/kernel/setup.c static DEFINE_PER_CPU(struct cpu, cpu_topology);
cpu               694 arch/arc/kernel/setup.c 	int cpu;
cpu               696 arch/arc/kernel/setup.c 	for_each_present_cpu(cpu)
cpu               697 arch/arc/kernel/setup.c 	    register_cpu(&per_cpu(cpu_topology, cpu), cpu);
cpu               137 arch/arc/kernel/smp.c static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
cpu               139 arch/arc/kernel/smp.c 	BUG_ON(cpu == 0);
cpu               141 arch/arc/kernel/smp.c 	__boot_write(wake_flag, cpu);
cpu               144 arch/arc/kernel/smp.c void arc_platform_smp_wait_to_boot(int cpu)
cpu               150 arch/arc/kernel/smp.c 	while (__boot_read(wake_flag) != cpu)
cpu               169 arch/arc/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               177 arch/arc/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu               181 arch/arc/kernel/smp.c 		plat_smp_ops.init_per_cpu(cpu);
cpu               184 arch/arc/kernel/smp.c 		machine_desc->init_per_cpu(cpu);
cpu               186 arch/arc/kernel/smp.c 	notify_cpu_starting(cpu);
cpu               187 arch/arc/kernel/smp.c 	set_cpu_online(cpu, true);
cpu               189 arch/arc/kernel/smp.c 	pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
cpu               206 arch/arc/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *idle)
cpu               212 arch/arc/kernel/smp.c 	pr_info("Idle Task [%d] %p", cpu, idle);
cpu               213 arch/arc/kernel/smp.c 	pr_info("Trying to bring up CPU%u ...\n", cpu);
cpu               216 arch/arc/kernel/smp.c 		plat_smp_ops.cpu_kick(cpu,
cpu               219 arch/arc/kernel/smp.c 		arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
cpu               224 arch/arc/kernel/smp.c 		if (cpu_online(cpu))
cpu               228 arch/arc/kernel/smp.c 	if (!cpu_online(cpu)) {
cpu               229 arch/arc/kernel/smp.c 		pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
cpu               265 arch/arc/kernel/smp.c static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
cpu               267 arch/arc/kernel/smp.c 	unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
cpu               271 arch/arc/kernel/smp.c 	pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
cpu               293 arch/arc/kernel/smp.c 		plat_smp_ops.ipi_send(cpu);
cpu               300 arch/arc/kernel/smp.c 	unsigned int cpu;
cpu               302 arch/arc/kernel/smp.c 	for_each_cpu(cpu, callmap)
cpu               303 arch/arc/kernel/smp.c 		ipi_send_msg_one(cpu, msg);
cpu               306 arch/arc/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               308 arch/arc/kernel/smp.c 	ipi_send_msg_one(cpu, IPI_RESCHEDULE);
cpu               319 arch/arc/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               321 arch/arc/kernel/smp.c 	ipi_send_msg_one(cpu, IPI_CALL_FUNC);
cpu               404 arch/arc/kernel/smp.c int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
cpu               406 arch/arc/kernel/smp.c 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
cpu               413 arch/arc/kernel/smp.c 	if (!cpu) {
cpu                75 arch/arc/mm/cache.c static void read_decode_cache_bcr_arcv2(int cpu)
cpu                77 arch/arc/mm/cache.c 	struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
cpu               133 arch/arc/mm/cache.c 	if (cpuinfo_arc700[cpu].core.family > 0x51) {
cpu               137 arch/arc/mm/cache.c 		if (cpuinfo_arc700[cpu].core.family > 0x52)
cpu               145 arch/arc/mm/cache.c 	unsigned int cpu = smp_processor_id();
cpu               154 arch/arc/mm/cache.c 	p_ic = &cpuinfo_arc700[cpu].icache;
cpu               173 arch/arc/mm/cache.c 	p_dc = &cpuinfo_arc700[cpu].dcache;
cpu               195 arch/arc/mm/cache.c                 read_decode_cache_bcr_arcv2(cpu);
cpu              1218 arch/arc/mm/cache.c 	unsigned int __maybe_unused cpu = smp_processor_id();
cpu              1221 arch/arc/mm/cache.c 		struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
cpu              1241 arch/arc/mm/cache.c 		struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
cpu              1302 arch/arc/mm/cache.c 	unsigned int __maybe_unused cpu = smp_processor_id();
cpu              1307 arch/arc/mm/cache.c 	if (!cpu)
cpu               327 arch/arc/mm/tlb.c 	const unsigned int cpu = smp_processor_id();
cpu               351 arch/arc/mm/tlb.c 	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
cpu               353 arch/arc/mm/tlb.c 			tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
cpu               400 arch/arc/mm/tlb.c 	const unsigned int cpu = smp_processor_id();
cpu               408 arch/arc/mm/tlb.c 	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
cpu               409 arch/arc/mm/tlb.c 		tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
cpu               703 arch/arc/mm/tlb.c 	unsigned int cpu;
cpu               708 arch/arc/mm/tlb.c 	cpu = smp_processor_id();
cpu               710 arch/arc/mm/tlb.c 	if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
cpu               711 arch/arc/mm/tlb.c 		unsigned int asid = hw_pid(vma->vm_mm, cpu);
cpu                11 arch/arc/plat-eznps/include/plat/mtm.h static inline void *nps_mtm_reg_addr(u32 cpu, u32 reg)
cpu                16 arch/arc/plat-eznps/include/plat/mtm.h 	gid.value = cpu;
cpu                20 arch/arc/plat-eznps/include/plat/mtm.h 	return nps_host_reg(cpu, blkid, reg);
cpu                24 arch/arc/plat-eznps/include/plat/mtm.h #define NPS_CPU_TO_THREAD_NUM(cpu) \
cpu                25 arch/arc/plat-eznps/include/plat/mtm.h 	({ struct global_id gid; gid.value = cpu; gid.thread; })
cpu                28 arch/arc/plat-eznps/include/plat/mtm.h #define MTM_CFG(cpu)			nps_mtm_reg_addr(cpu, 0x81)
cpu                29 arch/arc/plat-eznps/include/plat/mtm.h #define MTM_THR_INIT(cpu)		nps_mtm_reg_addr(cpu, 0x92)
cpu                30 arch/arc/plat-eznps/include/plat/mtm.h #define MTM_THR_INIT_STS(cpu)		nps_mtm_reg_addr(cpu, 0x93)
cpu                36 arch/arc/plat-eznps/include/plat/mtm.h void mtm_enable_core(unsigned int cpu);
cpu                37 arch/arc/plat-eznps/include/plat/mtm.h int mtm_enable_thread(int cpu);
cpu                43 arch/arc/plat-eznps/include/plat/mtm.h #define mtm_enable_core(cpu)
cpu                44 arch/arc/plat-eznps/include/plat/mtm.h #define mtm_enable_thread(cpu) 1
cpu                45 arch/arc/plat-eznps/include/plat/mtm.h #define NPS_CPU_TO_THREAD_NUM(cpu) 0
cpu                31 arch/arc/plat-eznps/mtm.c static void mtm_init_nat(int cpu)
cpu                38 arch/arc/plat-eznps/mtm.c 	for (i = 0, t = cpu; i < NPS_NUM_HW_THREADS; i++, t++)
cpu                47 arch/arc/plat-eznps/mtm.c 	mtm_cfg.value = ioread32be(MTM_CFG(cpu));
cpu                49 arch/arc/plat-eznps/mtm.c 	iowrite32be(mtm_cfg.value, MTM_CFG(cpu));
cpu                52 arch/arc/plat-eznps/mtm.c static void mtm_init_thread(int cpu)
cpu                60 arch/arc/plat-eznps/mtm.c 	iowrite32be(thr_init.value, MTM_THR_INIT(cpu));
cpu                61 arch/arc/plat-eznps/mtm.c 	thr_init.thr_id = NPS_CPU_TO_THREAD_NUM(cpu);
cpu                63 arch/arc/plat-eznps/mtm.c 	iowrite32be(thr_init.value, MTM_THR_INIT(cpu));
cpu                67 arch/arc/plat-eznps/mtm.c 		thr_init_sts.value = ioread32be(MTM_THR_INIT_STS(cpu));
cpu                72 arch/arc/plat-eznps/mtm.c 				pr_warn("Failed to thread init cpu %u\n", cpu);
cpu                76 arch/arc/plat-eznps/mtm.c 		pr_warn("Wrong thread id in thread init for cpu %u\n", cpu);
cpu                81 arch/arc/plat-eznps/mtm.c 		pr_warn("Got thread init timeout for cpu %u\n", cpu);
cpu                84 arch/arc/plat-eznps/mtm.c int mtm_enable_thread(int cpu)
cpu                88 arch/arc/plat-eznps/mtm.c 	if (NPS_CPU_TO_THREAD_NUM(cpu) == 0)
cpu                92 arch/arc/plat-eznps/mtm.c 	mtm_cfg.value = ioread32be(MTM_CFG(cpu));
cpu                93 arch/arc/plat-eznps/mtm.c 	mtm_cfg.ten |= (1 << (NPS_CPU_TO_THREAD_NUM(cpu)));
cpu                94 arch/arc/plat-eznps/mtm.c 	iowrite32be(mtm_cfg.value, MTM_CFG(cpu));
cpu                99 arch/arc/plat-eznps/mtm.c void mtm_enable_core(unsigned int cpu)
cpu               117 arch/arc/plat-eznps/mtm.c 	if (NPS_CPU_TO_THREAD_NUM(cpu) != 0)
cpu               121 arch/arc/plat-eznps/mtm.c 	mtm_init_nat(cpu);
cpu               124 arch/arc/plat-eznps/mtm.c 	mtm_cfg.value = ioread32be(MTM_CFG(cpu));
cpu               126 arch/arc/plat-eznps/mtm.c 	iowrite32be(mtm_cfg.value, MTM_CFG(cpu));
cpu               130 arch/arc/plat-eznps/mtm.c 		mtm_init_thread(cpu + i);
cpu                13 arch/arc/plat-eznps/platform.c 	int cpu;
cpu                22 arch/arc/plat-eznps/platform.c 	for (cpu = 0 ; cpu < eznps_max_cpus; cpu += eznps_cpus_per_cluster)
cpu                24 arch/arc/plat-eznps/platform.c 			    nps_host_reg(cpu, NPS_MSU_BLKID, NPS_MSU_EN_CFG));
cpu                53 arch/arc/plat-eznps/smp.c static void eznps_init_core(unsigned int cpu)
cpu                59 arch/arc/plat-eznps/smp.c 	if (NPS_CPU_TO_THREAD_NUM(cpu) != 0)
cpu                73 arch/arc/plat-eznps/smp.c 	if (!cpu) {
cpu                84 arch/arc/plat-eznps/smp.c static void __init eznps_smp_wakeup_cpu(int cpu, unsigned long pc)
cpu                88 arch/arc/plat-eznps/smp.c 	if (mtm_enable_thread(cpu) == 0)
cpu                95 arch/arc/plat-eznps/smp.c 	iowrite32be(cpu_cfg.value, nps_mtm_reg_addr(cpu, NPS_MTM_CPU_CFG));
cpu                98 arch/arc/plat-eznps/smp.c static void eznps_ipi_send(int cpu)
cpu               110 arch/arc/plat-eznps/smp.c 	gid.value = cpu;
cpu               124 arch/arc/plat-eznps/smp.c static void eznps_init_per_cpu(int cpu)
cpu               126 arch/arc/plat-eznps/smp.c 	smp_ipi_irq_setup(cpu, NPS_IPI_IRQ);
cpu               128 arch/arc/plat-eznps/smp.c 	eznps_init_core(cpu);
cpu               129 arch/arc/plat-eznps/smp.c 	mtm_enable_core(cpu);
cpu                20 arch/arc/plat-hsdk/platform.c static void __init hsdk_init_per_cpu(unsigned int cpu)
cpu                26 arch/arc/plat-hsdk/platform.c 	if (cpuinfo_arc700[cpu].iccm.sz)
cpu                33 arch/arc/plat-hsdk/platform.c 	if (cpuinfo_arc700[cpu].dccm.sz)
cpu               307 arch/arm/common/bL_switcher.c static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
cpu               312 arch/arm/common/bL_switcher.c 				      cpu_to_node(cpu), "kswitcher_%d", cpu);
cpu               314 arch/arm/common/bL_switcher.c 		kthread_bind(task, cpu);
cpu               317 arch/arm/common/bL_switcher.c 		pr_err("%s failed for CPU %d\n", __func__, cpu);
cpu               343 arch/arm/common/bL_switcher.c int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
cpu               349 arch/arm/common/bL_switcher.c 	if (cpu >= ARRAY_SIZE(bL_threads)) {
cpu               350 arch/arm/common/bL_switcher.c 		pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
cpu               354 arch/arm/common/bL_switcher.c 	t = &bL_threads[cpu];
cpu               423 arch/arm/common/bL_switcher.c 	unsigned int cpu, cluster, mask;
cpu               429 arch/arm/common/bL_switcher.c 		cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
cpu               435 arch/arm/common/bL_switcher.c 		if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
cpu               485 arch/arm/common/bL_switcher.c 		cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
cpu               495 arch/arm/common/bL_switcher.c 		bL_gic_id[cpu][cluster] = gic_id;
cpu               497 arch/arm/common/bL_switcher.c 			cpu, cluster, gic_id);
cpu               518 arch/arm/common/bL_switcher.c 	int cpu;
cpu               524 arch/arm/common/bL_switcher.c 	for_each_online_cpu(cpu) {
cpu               525 arch/arm/common/bL_switcher.c 		int pairing = bL_switcher_cpu_pairing[cpu];
cpu               528 arch/arm/common/bL_switcher.c 		if ((mpidr == cpu_logical_map(cpu)) ||
cpu               530 arch/arm/common/bL_switcher.c 			return cpu;
cpu               555 arch/arm/common/bL_switcher.c 	int cpu, ret;
cpu               577 arch/arm/common/bL_switcher.c 	for_each_online_cpu(cpu) {
cpu               578 arch/arm/common/bL_switcher.c 		struct bL_thread *t = &bL_threads[cpu];
cpu               583 arch/arm/common/bL_switcher.c 		t->task = bL_switcher_thread_create(cpu, t);
cpu               605 arch/arm/common/bL_switcher.c 	unsigned int cpu, cluster;
cpu               630 arch/arm/common/bL_switcher.c 	for_each_online_cpu(cpu) {
cpu               631 arch/arm/common/bL_switcher.c 		t = &bL_threads[cpu];
cpu               638 arch/arm/common/bL_switcher.c 		cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
cpu               639 arch/arm/common/bL_switcher.c 		if (cluster == bL_switcher_cpu_original_cluster[cpu])
cpu               642 arch/arm/common/bL_switcher.c 		t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
cpu               643 arch/arm/common/bL_switcher.c 		task = bL_switcher_thread_create(cpu, t);
cpu               647 arch/arm/common/bL_switcher.c 			cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
cpu               648 arch/arm/common/bL_switcher.c 			if (cluster == bL_switcher_cpu_original_cluster[cpu])
cpu               653 arch/arm/common/bL_switcher.c 			__func__, cpu);
cpu               655 arch/arm/common/bL_switcher.c 			__func__, bL_switcher_cpu_pairing[cpu]);
cpu               656 arch/arm/common/bL_switcher.c 		cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
cpu               756 arch/arm/common/bL_switcher.c static int bL_switcher_cpu_pre(unsigned int cpu)
cpu               763 arch/arm/common/bL_switcher.c 	pairing = bL_switcher_cpu_pairing[cpu];
cpu                22 arch/arm/common/bL_switcher_dummy_if.c 	unsigned int cpu, cluster;
cpu                39 arch/arm/common/bL_switcher_dummy_if.c 	cpu = val[0] - '0';
cpu                41 arch/arm/common/bL_switcher_dummy_if.c 	ret = bL_switch_request(cpu, cluster);
cpu                34 arch/arm/common/mcpm_entry.c static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
cpu                36 arch/arm/common/mcpm_entry.c 	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
cpu                37 arch/arm/common/mcpm_entry.c 	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
cpu                47 arch/arm/common/mcpm_entry.c static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
cpu                50 arch/arm/common/mcpm_entry.c 	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
cpu                51 arch/arm/common/mcpm_entry.c 	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
cpu                82 arch/arm/common/mcpm_entry.c static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
cpu               107 arch/arm/common/mcpm_entry.c 		if (i == cpu)
cpu               111 arch/arm/common/mcpm_entry.c 			cpustate = c->cpus[i].cpu;
cpu               116 arch/arm/common/mcpm_entry.c 			sync_cache_r(&c->cpus[i].cpu);
cpu               143 arch/arm/common/mcpm_entry.c void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
cpu               146 arch/arm/common/mcpm_entry.c 	mcpm_entry_vectors[cluster][cpu] = val;
cpu               147 arch/arm/common/mcpm_entry.c 	sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
cpu               152 arch/arm/common/mcpm_entry.c void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
cpu               155 arch/arm/common/mcpm_entry.c 	unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
cpu               195 arch/arm/common/mcpm_entry.c int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
cpu               200 arch/arm/common/mcpm_entry.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu               212 arch/arm/common/mcpm_entry.c 	cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
cpu               215 arch/arm/common/mcpm_entry.c 	mcpm_cpu_use_count[cluster][cpu]++;
cpu               224 arch/arm/common/mcpm_entry.c 	BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 &&
cpu               225 arch/arm/common/mcpm_entry.c 	       mcpm_cpu_use_count[cluster][cpu] != 2);
cpu               230 arch/arm/common/mcpm_entry.c 		ret = platform_ops->cpu_powerup(cpu, cluster);
cpu               241 arch/arm/common/mcpm_entry.c 	unsigned int mpidr, cpu, cluster;
cpu               246 arch/arm/common/mcpm_entry.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               248 arch/arm/common/mcpm_entry.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu               255 arch/arm/common/mcpm_entry.c 	__mcpm_cpu_going_down(cpu, cluster);
cpu               259 arch/arm/common/mcpm_entry.c 	mcpm_cpu_use_count[cluster][cpu]--;
cpu               260 arch/arm/common/mcpm_entry.c 	BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 &&
cpu               261 arch/arm/common/mcpm_entry.c 	       mcpm_cpu_use_count[cluster][cpu] != 1);
cpu               262 arch/arm/common/mcpm_entry.c 	cpu_going_down = !mcpm_cpu_use_count[cluster][cpu];
cpu               265 arch/arm/common/mcpm_entry.c 	if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
cpu               266 arch/arm/common/mcpm_entry.c 		platform_ops->cpu_powerdown_prepare(cpu, cluster);
cpu               273 arch/arm/common/mcpm_entry.c 			platform_ops->cpu_powerdown_prepare(cpu, cluster);
cpu               286 arch/arm/common/mcpm_entry.c 	__mcpm_cpu_down(cpu, cluster);
cpu               308 arch/arm/common/mcpm_entry.c int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
cpu               315 arch/arm/common/mcpm_entry.c 	ret = platform_ops->wait_for_powerdown(cpu, cluster);
cpu               318 arch/arm/common/mcpm_entry.c 			__func__, cpu, cluster, ret);
cpu               331 arch/arm/common/mcpm_entry.c 		unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               334 arch/arm/common/mcpm_entry.c 		platform_ops->cpu_suspend_prepare(cpu, cluster);
cpu               342 arch/arm/common/mcpm_entry.c 	unsigned int mpidr, cpu, cluster;
cpu               350 arch/arm/common/mcpm_entry.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               355 arch/arm/common/mcpm_entry.c 	cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
cpu               361 arch/arm/common/mcpm_entry.c 		mcpm_cpu_use_count[cluster][cpu] = 1;
cpu               363 arch/arm/common/mcpm_entry.c 		platform_ops->cpu_is_up(cpu, cluster);
cpu               377 arch/arm/common/mcpm_entry.c 	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               381 arch/arm/common/mcpm_entry.c 	mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp);
cpu               384 arch/arm/common/mcpm_entry.c 	__mcpm_cpu_going_down(cpu, cluster);
cpu               385 arch/arm/common/mcpm_entry.c 	BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
cpu               388 arch/arm/common/mcpm_entry.c 	__mcpm_cpu_down(cpu, cluster);
cpu               439 arch/arm/common/mcpm_entry.c 			mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
cpu               445 arch/arm/common/mcpm_entry.c 		mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
cpu                19 arch/arm/common/mcpm_platsmp.c static void cpu_to_pcpu(unsigned int cpu,
cpu                24 arch/arm/common/mcpm_platsmp.c 	mpidr = cpu_logical_map(cpu);
cpu                29 arch/arm/common/mcpm_platsmp.c static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                34 arch/arm/common/mcpm_platsmp.c 	cpu_to_pcpu(cpu, &pcpu, &pcluster);
cpu                37 arch/arm/common/mcpm_platsmp.c 		 __func__, cpu, pcpu, pcluster);
cpu                44 arch/arm/common/mcpm_platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu                49 arch/arm/common/mcpm_platsmp.c static void mcpm_secondary_init(unsigned int cpu)
cpu                56 arch/arm/common/mcpm_platsmp.c static int mcpm_cpu_kill(unsigned int cpu)
cpu                60 arch/arm/common/mcpm_platsmp.c 	cpu_to_pcpu(cpu, &pcpu, &pcluster);
cpu                65 arch/arm/common/mcpm_platsmp.c static bool mcpm_cpu_can_disable(unsigned int cpu)
cpu                71 arch/arm/common/mcpm_platsmp.c static void mcpm_cpu_die(unsigned int cpu)
cpu               240 arch/arm/crypto/crc32-ce-glue.c MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature);
cpu                17 arch/arm/include/asm/bL_switcher.h int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
cpu                20 arch/arm/include/asm/bL_switcher.h static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
cpu                22 arch/arm/include/asm/bL_switcher.h 	return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
cpu                14 arch/arm/include/asm/cpu.h 	struct cpu	cpu;
cpu                35 arch/arm/include/asm/cpuidle.h 	int (*init)(struct device_node *, int cpu);
cpu                50 arch/arm/include/asm/cpuidle.h extern int arm_cpuidle_init(int cpu);
cpu                32 arch/arm/include/asm/firmware.h 	int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr);
cpu                36 arch/arm/include/asm/firmware.h 	int (*get_cpu_boot_addr)(int cpu, unsigned long *boot_addr);
cpu                40 arch/arm/include/asm/firmware.h 	int (*cpu_boot)(int cpu);
cpu                41 arch/arm/include/asm/fpstate.h 	__u32 cpu;
cpu                21 arch/arm/include/asm/hardirq.h #define __inc_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)++
cpu                22 arch/arm/include/asm/hardirq.h #define __get_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)
cpu                25 arch/arm/include/asm/hardirq.h u64 smp_irq_stat_cpu(unsigned int cpu);
cpu                27 arch/arm/include/asm/hardirq.h #define smp_irq_stat_cpu(cpu)	0
cpu               334 arch/arm/include/asm/kvm_host.h static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
cpu                44 arch/arm/include/asm/mcpm.h void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
cpu                51 arch/arm/include/asm/mcpm.h void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
cpu                84 arch/arm/include/asm/mcpm.h int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
cpu               132 arch/arm/include/asm/mcpm.h int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
cpu               219 arch/arm/include/asm/mcpm.h 	int (*cpu_powerup)(unsigned int cpu, unsigned int cluster);
cpu               221 arch/arm/include/asm/mcpm.h 	void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster);
cpu               222 arch/arm/include/asm/mcpm.h 	void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster);
cpu               226 arch/arm/include/asm/mcpm.h 	void (*cpu_is_up)(unsigned int cpu, unsigned int cluster);
cpu               228 arch/arm/include/asm/mcpm.h 	int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
cpu               287 arch/arm/include/asm/mcpm.h 		s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
cpu               132 arch/arm/include/asm/mmu_context.h 	unsigned int cpu = smp_processor_id();
cpu               141 arch/arm/include/asm/mmu_context.h 	    !cpumask_test_cpu(cpu, mm_cpumask(next)))
cpu               144 arch/arm/include/asm/mmu_context.h 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
cpu               147 arch/arm/include/asm/mmu_context.h 			cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpu                11 arch/arm/include/asm/paravirt.h 	unsigned long long (*steal_clock)(int cpu);
cpu                20 arch/arm/include/asm/paravirt.h static inline u64 paravirt_steal_clock(int cpu)
cpu                22 arch/arm/include/asm/paravirt.h 	return pv_ops.time.steal_clock(cpu);
cpu               118 arch/arm/include/asm/proc-fns.h 	unsigned int cpu = smp_processor_id();
cpu               119 arch/arm/include/asm/proc-fns.h 	*cpu_vtable[cpu] = *p;
cpu               120 arch/arm/include/asm/proc-fns.h 	WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
cpu               122 arch/arm/include/asm/proc-fns.h 	WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
cpu                18 arch/arm/include/asm/smp.h #define raw_smp_processor_id() (current_thread_info()->cpu)
cpu                72 arch/arm/include/asm/smp.h extern void __cpu_die(unsigned int cpu);
cpu                74 arch/arm/include/asm/smp.h extern void arch_send_call_function_single_ipi(int cpu);
cpu                78 arch/arm/include/asm/smp.h extern int register_ipi_completion(struct completion *completion, int cpu);
cpu                94 arch/arm/include/asm/smp.h 	void (*smp_secondary_init)(unsigned int cpu);
cpu                99 arch/arm/include/asm/smp.h 	int  (*smp_boot_secondary)(unsigned int cpu, struct task_struct *idle);
cpu               101 arch/arm/include/asm/smp.h 	int  (*cpu_kill)(unsigned int cpu);
cpu               102 arch/arm/include/asm/smp.h 	void (*cpu_die)(unsigned int cpu);
cpu               103 arch/arm/include/asm/smp.h 	bool  (*cpu_can_disable)(unsigned int cpu);
cpu               104 arch/arm/include/asm/smp.h 	int  (*cpu_disable)(unsigned int cpu);
cpu                36 arch/arm/include/asm/smp_plat.h static inline unsigned int smp_cpuid_part(int cpu)
cpu                38 arch/arm/include/asm/smp_plat.h 	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu);
cpu                73 arch/arm/include/asm/smp_plat.h #define cpu_logical_map(cpu)	__cpu_logical_map[cpu]
cpu                82 arch/arm/include/asm/smp_plat.h 	int cpu;
cpu                83 arch/arm/include/asm/smp_plat.h 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
cpu                84 arch/arm/include/asm/smp_plat.h 		if (cpu_logical_map(cpu) == mpidr)
cpu                85 arch/arm/include/asm/smp_plat.h 			return cpu;
cpu               112 arch/arm/include/asm/smp_plat.h extern int platform_can_hotplug_cpu(unsigned int cpu);
cpu               114 arch/arm/include/asm/smp_plat.h static inline int platform_can_hotplug_cpu(unsigned int cpu)
cpu                51 arch/arm/include/asm/thread_info.h 	__u32			cpu;		/* cpu */
cpu                58 arch/arm/kernel/asm-offsets.c   DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
cpu                67 arch/arm/kernel/asm-offsets.c   DEFINE(VFP_CPU,		offsetof(union vfp_state, hard.cpu));
cpu                48 arch/arm/kernel/cpuidle.c 	int cpu = smp_processor_id();
cpu                50 arch/arm/kernel/cpuidle.c 	return cpuidle_ops[cpu].suspend(index);
cpu                87 arch/arm/kernel/cpuidle.c static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
cpu               109 arch/arm/kernel/cpuidle.c 	cpuidle_ops[cpu] = *ops; /* structure copy */
cpu               134 arch/arm/kernel/cpuidle.c int __init arm_cpuidle_init(int cpu)
cpu               136 arch/arm/kernel/cpuidle.c 	struct device_node *cpu_node = of_cpu_device_node_get(cpu);
cpu               142 arch/arm/kernel/cpuidle.c 	ret = arm_cpuidle_read_ops(cpu_node, cpu);
cpu               144 arch/arm/kernel/cpuidle.c 		ret = cpuidle_ops[cpu].init(cpu_node, cpu);
cpu                74 arch/arm/kernel/devtree.c 	struct device_node *cpu, *cpus;
cpu                86 arch/arm/kernel/devtree.c 	for_each_of_cpu_node(cpu) {
cpu                91 arch/arm/kernel/devtree.c 		pr_debug(" * %pOF...\n", cpu);
cpu                97 arch/arm/kernel/devtree.c 		cell = of_get_property(cpu, "reg", &prop_bytes);
cpu                99 arch/arm/kernel/devtree.c 			pr_debug(" * %pOF missing reg property\n", cpu);
cpu               100 arch/arm/kernel/devtree.c 			of_node_put(cpu);
cpu               114 arch/arm/kernel/devtree.c 			of_node_put(cpu);
cpu               128 arch/arm/kernel/devtree.c 				of_node_put(cpu);
cpu               152 arch/arm/kernel/devtree.c 			of_node_put(cpu);
cpu               159 arch/arm/kernel/devtree.c 			found_method = set_smp_ops_by_method(cpu);
cpu               186 arch/arm/kernel/devtree.c bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
cpu               188 arch/arm/kernel/devtree.c 	return phys_id == cpu_logical_map(cpu);
cpu               887 arch/arm/kernel/hw_breakpoint.c 	int cpu = smp_processor_id();
cpu               890 arch/arm/kernel/hw_breakpoint.c 		instr, cpu);
cpu               893 arch/arm/kernel/hw_breakpoint.c 	cpumask_set_cpu(cpu, &debug_err_mask);
cpu               922 arch/arm/kernel/hw_breakpoint.c static void reset_ctrl_regs(unsigned int cpu)
cpu               963 arch/arm/kernel/hw_breakpoint.c 		pr_warn_once("CPU %d debug is powered down!\n", cpu);
cpu               964 arch/arm/kernel/hw_breakpoint.c 		cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
cpu               983 arch/arm/kernel/hw_breakpoint.c 	if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
cpu               984 arch/arm/kernel/hw_breakpoint.c 		pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
cpu              1003 arch/arm/kernel/hw_breakpoint.c 	if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
cpu              1004 arch/arm/kernel/hw_breakpoint.c 		pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
cpu              1014 arch/arm/kernel/hw_breakpoint.c 		cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
cpu              1017 arch/arm/kernel/hw_breakpoint.c static int dbg_reset_online(unsigned int cpu)
cpu              1020 arch/arm/kernel/hw_breakpoint.c 	reset_ctrl_regs(cpu);
cpu                45 arch/arm/kernel/psci_smp.c static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                48 arch/arm/kernel/psci_smp.c 		return psci_ops.cpu_on(cpu_logical_map(cpu),
cpu                54 arch/arm/kernel/psci_smp.c int psci_cpu_disable(unsigned int cpu)
cpu                61 arch/arm/kernel/psci_smp.c 	if (psci_tos_resident_on(cpu))
cpu                67 arch/arm/kernel/psci_smp.c void psci_cpu_die(unsigned int cpu)
cpu                76 arch/arm/kernel/psci_smp.c 	panic("psci: cpu %d failed to shutdown\n", cpu);
cpu                79 arch/arm/kernel/psci_smp.c int psci_cpu_kill(unsigned int cpu)
cpu                92 arch/arm/kernel/psci_smp.c 		err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
cpu                94 arch/arm/kernel/psci_smp.c 			pr_info("CPU%d killed.\n", cpu);
cpu               103 arch/arm/kernel/psci_smp.c 			cpu, err);
cpu               525 arch/arm/kernel/setup.c 	unsigned int cpu = smp_processor_id();
cpu               526 arch/arm/kernel/setup.c 	struct stack *stk = &stacks[cpu];
cpu               528 arch/arm/kernel/setup.c 	if (cpu >= NR_CPUS) {
cpu               529 arch/arm/kernel/setup.c 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
cpu               537 arch/arm/kernel/setup.c 	set_my_cpu_offset(per_cpu_offset(cpu));
cpu               589 arch/arm/kernel/setup.c 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               591 arch/arm/kernel/setup.c 	cpu_logical_map(0) = cpu;
cpu               593 arch/arm/kernel/setup.c 		cpu_logical_map(i) = i == cpu ? 0 : i;
cpu              1179 arch/arm/kernel/setup.c 	int cpu;
cpu              1181 arch/arm/kernel/setup.c 	for_each_possible_cpu(cpu) {
cpu              1182 arch/arm/kernel/setup.c 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
cpu              1183 arch/arm/kernel/setup.c 		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
cpu              1184 arch/arm/kernel/setup.c 		register_cpu(&cpuinfo->cpu, cpu);
cpu               102 arch/arm/kernel/smp.c static int secondary_biglittle_prepare(unsigned int cpu)
cpu               104 arch/arm/kernel/smp.c 	if (!cpu_vtable[cpu])
cpu               105 arch/arm/kernel/smp.c 		cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
cpu               107 arch/arm/kernel/smp.c 	return cpu_vtable[cpu] ? 0 : -ENOMEM;
cpu               115 arch/arm/kernel/smp.c static int secondary_biglittle_prepare(unsigned int cpu)
cpu               125 arch/arm/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *idle)
cpu               132 arch/arm/kernel/smp.c 	ret = secondary_biglittle_prepare(cpu);
cpu               154 arch/arm/kernel/smp.c 	ret = smp_ops.smp_boot_secondary(cpu, idle);
cpu               163 arch/arm/kernel/smp.c 		if (!cpu_online(cpu)) {
cpu               164 arch/arm/kernel/smp.c 			pr_crit("CPU%u: failed to come online\n", cpu);
cpu               168 arch/arm/kernel/smp.c 		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
cpu               199 arch/arm/kernel/smp.c static int platform_cpu_kill(unsigned int cpu)
cpu               202 arch/arm/kernel/smp.c 		return smp_ops.cpu_kill(cpu);
cpu               206 arch/arm/kernel/smp.c static int platform_cpu_disable(unsigned int cpu)
cpu               209 arch/arm/kernel/smp.c 		return smp_ops.cpu_disable(cpu);
cpu               214 arch/arm/kernel/smp.c int platform_can_hotplug_cpu(unsigned int cpu)
cpu               221 arch/arm/kernel/smp.c 		return smp_ops.cpu_can_disable(cpu);
cpu               228 arch/arm/kernel/smp.c 	return cpu != 0;
cpu               236 arch/arm/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               239 arch/arm/kernel/smp.c 	ret = platform_cpu_disable(cpu);
cpu               244 arch/arm/kernel/smp.c 	remove_cpu_topology(cpu);
cpu               251 arch/arm/kernel/smp.c 	set_cpu_online(cpu, false);
cpu               275 arch/arm/kernel/smp.c void __cpu_die(unsigned int cpu)
cpu               277 arch/arm/kernel/smp.c 	if (!cpu_wait_death(cpu, 5)) {
cpu               278 arch/arm/kernel/smp.c 		pr_err("CPU%u: cpu didn't die\n", cpu);
cpu               281 arch/arm/kernel/smp.c 	pr_debug("CPU%u: shutdown\n", cpu);
cpu               283 arch/arm/kernel/smp.c 	clear_tasks_mm_cpumask(cpu);
cpu               291 arch/arm/kernel/smp.c 	if (!platform_cpu_kill(cpu))
cpu               292 arch/arm/kernel/smp.c 		pr_err("CPU%u: unable to kill\n", cpu);
cpu               305 arch/arm/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               347 arch/arm/kernel/smp.c 		smp_ops.cpu_die(cpu);
cpu               350 arch/arm/kernel/smp.c 		cpu);
cpu               387 arch/arm/kernel/smp.c 	unsigned int cpu;
cpu               404 arch/arm/kernel/smp.c 	cpu = smp_processor_id();
cpu               407 arch/arm/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu               414 arch/arm/kernel/smp.c 	pr_debug("CPU%u: Booted secondary processor\n", cpu);
cpu               423 arch/arm/kernel/smp.c 		smp_ops.smp_secondary_init(cpu);
cpu               425 arch/arm/kernel/smp.c 	notify_cpu_starting(cpu);
cpu               429 arch/arm/kernel/smp.c 	smp_store_cpu_info(cpu);
cpu               436 arch/arm/kernel/smp.c 	set_cpu_online(cpu, true);
cpu               454 arch/arm/kernel/smp.c 	int cpu;
cpu               457 arch/arm/kernel/smp.c 	for_each_online_cpu(cpu)
cpu               458 arch/arm/kernel/smp.c 		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
cpu               532 arch/arm/kernel/smp.c 	unsigned int cpu, i;
cpu               537 arch/arm/kernel/smp.c 		for_each_online_cpu(cpu)
cpu               539 arch/arm/kernel/smp.c 				   __get_irq_stat(cpu, ipi_irqs[i]));
cpu               545 arch/arm/kernel/smp.c u64 smp_irq_stat_cpu(unsigned int cpu)
cpu               551 arch/arm/kernel/smp.c 		sum += __get_irq_stat(cpu, ipi_irqs[i]);
cpu               566 arch/arm/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               568 arch/arm/kernel/smp.c 	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
cpu               591 arch/arm/kernel/smp.c static void ipi_cpu_stop(unsigned int cpu)
cpu               595 arch/arm/kernel/smp.c 		pr_crit("CPU%u: stopping\n", cpu);
cpu               600 arch/arm/kernel/smp.c 	set_cpu_online(cpu, false);
cpu               613 arch/arm/kernel/smp.c int register_ipi_completion(struct completion *completion, int cpu)
cpu               615 arch/arm/kernel/smp.c 	per_cpu(cpu_completion, cpu) = completion;
cpu               619 arch/arm/kernel/smp.c static void ipi_complete(unsigned int cpu)
cpu               621 arch/arm/kernel/smp.c 	complete(per_cpu(cpu_completion, cpu));
cpu               634 arch/arm/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               639 arch/arm/kernel/smp.c 		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
cpu               666 arch/arm/kernel/smp.c 		ipi_cpu_stop(cpu);
cpu               680 arch/arm/kernel/smp.c 		ipi_complete(cpu);
cpu               694 arch/arm/kernel/smp.c 		        cpu, ipinr);
cpu               703 arch/arm/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               705 arch/arm/kernel/smp.c 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
cpu               762 arch/arm/kernel/smp.c 	int cpu, first = cpumask_first(cpus);
cpu               769 arch/arm/kernel/smp.c 		for_each_cpu(cpu, cpus) {
cpu               770 arch/arm/kernel/smp.c 			per_cpu(l_p_j_ref, cpu) =
cpu               771 arch/arm/kernel/smp.c 				per_cpu(cpu_data, cpu).loops_per_jiffy;
cpu               772 arch/arm/kernel/smp.c 			per_cpu(l_p_j_ref_freq, cpu) = freq->old;
cpu               789 arch/arm/kernel/smp.c 		for_each_cpu(cpu, cpus)
cpu               790 arch/arm/kernel/smp.c 			per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
cpu                78 arch/arm/kernel/smp_scu.c 	int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(logical_cpu), 0);
cpu                80 arch/arm/kernel/smp_scu.c 	if (mode > 3 || mode == 1 || cpu > 3)
cpu                83 arch/arm/kernel/smp_scu.c 	val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu);
cpu                86 arch/arm/kernel/smp_scu.c 	writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
cpu               107 arch/arm/kernel/smp_scu.c int scu_cpu_power_enable(void __iomem *scu_base, unsigned int cpu)
cpu               109 arch/arm/kernel/smp_scu.c 	return scu_set_power_mode_internal(scu_base, cpu, SCU_PM_NORMAL);
cpu               115 arch/arm/kernel/smp_scu.c 	int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(logical_cpu), 0);
cpu               117 arch/arm/kernel/smp_scu.c 	if (cpu > 3)
cpu               120 arch/arm/kernel/smp_scu.c 	val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu);
cpu               222 arch/arm/kernel/smp_twd.c 	int cpu = smp_processor_id();
cpu               228 arch/arm/kernel/smp_twd.c 	if (per_cpu(percpu_setup_called, cpu)) {
cpu               234 arch/arm/kernel/smp_twd.c 	per_cpu(percpu_setup_called, cpu) = true;
cpu               253 arch/arm/kernel/smp_twd.c 	clk->cpumask = cpumask_of(cpu);
cpu               260 arch/arm/kernel/smp_twd.c static int twd_timer_starting_cpu(unsigned int cpu)
cpu               266 arch/arm/kernel/smp_twd.c static int twd_timer_dying_cpu(unsigned int cpu)
cpu                72 arch/arm/kernel/topology.c #define cpu_capacity(cpu)	__cpu_capacity[cpu]
cpu                92 arch/arm/kernel/topology.c 	int cpu = 0;
cpu                97 arch/arm/kernel/topology.c 	for_each_possible_cpu(cpu) {
cpu               102 arch/arm/kernel/topology.c 		cn = of_get_cpu_node(cpu, NULL);
cpu               104 arch/arm/kernel/topology.c 			pr_err("missing device node for CPU %d\n", cpu);
cpu               108 arch/arm/kernel/topology.c 		if (topology_parse_cpu_capacity(cn, cpu)) {
cpu               138 arch/arm/kernel/topology.c 		cpu_capacity(cpu) = capacity;
cpu               164 arch/arm/kernel/topology.c static void update_cpu_capacity(unsigned int cpu)
cpu               166 arch/arm/kernel/topology.c 	if (!cpu_capacity(cpu) || cap_from_dt)
cpu               169 arch/arm/kernel/topology.c 	topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
cpu               172 arch/arm/kernel/topology.c 		cpu, topology_get_cpu_scale(cpu));
cpu               184 arch/arm/kernel/topology.c const struct cpumask *cpu_corepower_mask(int cpu)
cpu               186 arch/arm/kernel/topology.c 	return &cpu_topology[cpu].thread_sibling;
cpu               300 arch/arm/kernel/traps.c 	int cpu;
cpu               307 arch/arm/kernel/traps.c 	cpu = smp_processor_id();
cpu               309 arch/arm/kernel/traps.c 		if (cpu == die_owner)
cpu               315 arch/arm/kernel/traps.c 	die_owner = cpu;
cpu                38 arch/arm/mach-actions/platsmp.c static int s500_wakeup_secondary(unsigned int cpu)
cpu                42 arch/arm/mach-actions/platsmp.c 	if (cpu > 3)
cpu                46 arch/arm/mach-actions/platsmp.c 	switch (cpu) {
cpu                67 arch/arm/mach-actions/platsmp.c 	       timer_base_addr + OWL_CPU1_ADDR + (cpu - 1) * 4);
cpu                69 arch/arm/mach-actions/platsmp.c 	       timer_base_addr + OWL_CPU1_FLAG + (cpu - 1) * 4);
cpu                77 arch/arm/mach-actions/platsmp.c static int s500_smp_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                81 arch/arm/mach-actions/platsmp.c 	ret = s500_wakeup_secondary(cpu);
cpu                87 arch/arm/mach-actions/platsmp.c 	smp_send_reschedule(cpu);
cpu                89 arch/arm/mach-actions/platsmp.c 	writel(0, timer_base_addr + OWL_CPU1_ADDR + (cpu - 1) * 4);
cpu                90 arch/arm/mach-actions/platsmp.c 	writel(0, timer_base_addr + OWL_CPU1_FLAG + (cpu - 1) * 4);
cpu                18 arch/arm/mach-alpine/alpine_cpu_pm.c #define AL_SYSFAB_POWER_CONTROL(cpu)	(0x2000 + (cpu)*0x100 + 0x20)
cpu                17 arch/arm/mach-alpine/platsmp.c static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                28 arch/arm/mach-alpine/platsmp.c 	return alpine_cpu_wakeup(cpu_logical_map(cpu), (uint32_t)addr);
cpu                15 arch/arm/mach-aspeed/platsmp.c static int aspeed_g6_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                27 arch/arm/mach-aspeed/platsmp.c 	writel_relaxed((0xABBAAB00 | (cpu & 0xff)), base + BOOT_SIG);
cpu                31 arch/arm/mach-axxia/platsmp.c static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                47 arch/arm/mach-axxia/platsmp.c 	tmp &= ~(1 << cpu);
cpu                56 arch/arm/mach-axxia/platsmp.c 	int cpu;
cpu                62 arch/arm/mach-axxia/platsmp.c 	for_each_possible_cpu(cpu) {
cpu                66 arch/arm/mach-axxia/platsmp.c 		np = of_get_cpu_node(cpu, NULL);
cpu                73 arch/arm/mach-axxia/platsmp.c 			set_cpu_present(cpu, true);
cpu                88 arch/arm/mach-bcm/bcm63xx_pmb.c 				     unsigned int *cpu,
cpu                94 arch/arm/mach-bcm/bcm63xx_pmb.c 	ret = of_property_read_u32(dn, "reg", cpu);
cpu               127 arch/arm/mach-bcm/bcm63xx_pmb.c 	unsigned int cpu, addr;
cpu               132 arch/arm/mach-bcm/bcm63xx_pmb.c 	ret = bcm63xx_pmb_get_resources(dn, &base, &cpu, &addr);
cpu               137 arch/arm/mach-bcm/bcm63xx_pmb.c 	WARN_ON(cpu > 1);
cpu               149 arch/arm/mach-bcm/bcm63xx_pmb.c 	if (ctrl & CPU_RESET_N(cpu)) {
cpu               150 arch/arm/mach-bcm/bcm63xx_pmb.c 		pr_info("PMB: CPU%d is already powered on\n", cpu);
cpu               156 arch/arm/mach-bcm/bcm63xx_pmb.c 	ret = bpcm_rd(base, addr, ARM_PWR_CONTROL(cpu), &val);
cpu               162 arch/arm/mach-bcm/bcm63xx_pmb.c 	ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
cpu               169 arch/arm/mach-bcm/bcm63xx_pmb.c 	ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
cpu               176 arch/arm/mach-bcm/bcm63xx_pmb.c 	ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val);
cpu               183 arch/arm/mach-bcm/bcm63xx_pmb.c 	ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val);
cpu               189 arch/arm/mach-bcm/bcm63xx_pmb.c 	ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
cpu               196 arch/arm/mach-bcm/bcm63xx_pmb.c 	ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
cpu               203 arch/arm/mach-bcm/bcm63xx_pmb.c 	ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val);
cpu               208 arch/arm/mach-bcm/bcm63xx_pmb.c 	ctrl |= CPU_RESET_N(cpu);
cpu               106 arch/arm/mach-bcm/bcm63xx_smp.c static int bcm63138_smp_boot_secondary(unsigned int cpu,
cpu               129 arch/arm/mach-bcm/bcm63xx_smp.c 	dn = of_get_cpu_node(cpu, NULL);
cpu               131 arch/arm/mach-bcm/bcm63xx_smp.c 		pr_err("SMP: failed to locate secondary CPU%d node\n", cpu);
cpu                67 arch/arm/mach-bcm/platsmp-brcmstb.c static int per_cpu_sw_state_rd(u32 cpu)
cpu                69 arch/arm/mach-bcm/platsmp-brcmstb.c 	sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
cpu                70 arch/arm/mach-bcm/platsmp-brcmstb.c 	return per_cpu(per_cpu_sw_state, cpu);
cpu                73 arch/arm/mach-bcm/platsmp-brcmstb.c static void per_cpu_sw_state_wr(u32 cpu, int val)
cpu                76 arch/arm/mach-bcm/platsmp-brcmstb.c 	per_cpu(per_cpu_sw_state, cpu) = val;
cpu                77 arch/arm/mach-bcm/platsmp-brcmstb.c 	sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
cpu                80 arch/arm/mach-bcm/platsmp-brcmstb.c static inline void per_cpu_sw_state_wr(u32 cpu, int val) { }
cpu                83 arch/arm/mach-bcm/platsmp-brcmstb.c static void __iomem *pwr_ctrl_get_base(u32 cpu)
cpu                86 arch/arm/mach-bcm/platsmp-brcmstb.c 	base += (cpu_logical_map(cpu) * 4);
cpu                90 arch/arm/mach-bcm/platsmp-brcmstb.c static u32 pwr_ctrl_rd(u32 cpu)
cpu                92 arch/arm/mach-bcm/platsmp-brcmstb.c 	void __iomem *base = pwr_ctrl_get_base(cpu);
cpu                96 arch/arm/mach-bcm/platsmp-brcmstb.c static void pwr_ctrl_set(unsigned int cpu, u32 val, u32 mask)
cpu                98 arch/arm/mach-bcm/platsmp-brcmstb.c 	void __iomem *base = pwr_ctrl_get_base(cpu);
cpu               102 arch/arm/mach-bcm/platsmp-brcmstb.c static void pwr_ctrl_clr(unsigned int cpu, u32 val, u32 mask)
cpu               104 arch/arm/mach-bcm/platsmp-brcmstb.c 	void __iomem *base = pwr_ctrl_get_base(cpu);
cpu               109 arch/arm/mach-bcm/platsmp-brcmstb.c static int pwr_ctrl_wait_tmout(unsigned int cpu, u32 set, u32 mask)
cpu               115 arch/arm/mach-bcm/platsmp-brcmstb.c 		tmp = pwr_ctrl_rd(cpu) & mask;
cpu               120 arch/arm/mach-bcm/platsmp-brcmstb.c 	tmp = pwr_ctrl_rd(cpu) & mask;
cpu               127 arch/arm/mach-bcm/platsmp-brcmstb.c static void cpu_rst_cfg_set(u32 cpu, int set)
cpu               132 arch/arm/mach-bcm/platsmp-brcmstb.c 		val |= BIT(cpu_logical_map(cpu));
cpu               134 arch/arm/mach-bcm/platsmp-brcmstb.c 		val &= ~BIT(cpu_logical_map(cpu));
cpu               138 arch/arm/mach-bcm/platsmp-brcmstb.c static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr)
cpu               140 arch/arm/mach-bcm/platsmp-brcmstb.c 	const int reg_ofs = cpu_logical_map(cpu) * 8;
cpu               145 arch/arm/mach-bcm/platsmp-brcmstb.c static void brcmstb_cpu_boot(u32 cpu)
cpu               148 arch/arm/mach-bcm/platsmp-brcmstb.c 	per_cpu_sw_state_wr(cpu, 1);
cpu               154 arch/arm/mach-bcm/platsmp-brcmstb.c 	cpu_set_boot_addr(cpu, __pa_symbol(secondary_startup));
cpu               157 arch/arm/mach-bcm/platsmp-brcmstb.c 	cpu_rst_cfg_set(cpu, 0);
cpu               160 arch/arm/mach-bcm/platsmp-brcmstb.c static void brcmstb_cpu_power_on(u32 cpu)
cpu               166 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, 0xffffff00);
cpu               167 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
cpu               168 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_set(cpu, ZONE_RESERVED_1_MASK, -1);
cpu               170 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_set(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
cpu               172 arch/arm/mach-bcm/platsmp-brcmstb.c 	if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_MEM_PWR_STATE_MASK))
cpu               175 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_set(cpu, ZONE_MAN_CLKEN_MASK, -1);
cpu               177 arch/arm/mach-bcm/platsmp-brcmstb.c 	if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_DPG_PWR_STATE_MASK))
cpu               180 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_clr(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
cpu               181 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_set(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
cpu               184 arch/arm/mach-bcm/platsmp-brcmstb.c static int brcmstb_cpu_get_power_state(u32 cpu)
cpu               186 arch/arm/mach-bcm/platsmp-brcmstb.c 	int tmp = pwr_ctrl_rd(cpu);
cpu               192 arch/arm/mach-bcm/platsmp-brcmstb.c static void brcmstb_cpu_die(u32 cpu)
cpu               196 arch/arm/mach-bcm/platsmp-brcmstb.c 	per_cpu_sw_state_wr(cpu, 0);
cpu               206 arch/arm/mach-bcm/platsmp-brcmstb.c static int brcmstb_cpu_kill(u32 cpu)
cpu               214 arch/arm/mach-bcm/platsmp-brcmstb.c 	if (cpu == 0) {
cpu               219 arch/arm/mach-bcm/platsmp-brcmstb.c 	while (per_cpu_sw_state_rd(cpu))
cpu               222 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
cpu               223 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_clr(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
cpu               224 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_clr(cpu, ZONE_MAN_CLKEN_MASK, -1);
cpu               225 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
cpu               226 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_clr(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
cpu               228 arch/arm/mach-bcm/platsmp-brcmstb.c 	if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_MEM_PWR_STATE_MASK))
cpu               231 arch/arm/mach-bcm/platsmp-brcmstb.c 	pwr_ctrl_clr(cpu, ZONE_RESERVED_1_MASK, -1);
cpu               233 arch/arm/mach-bcm/platsmp-brcmstb.c 	if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_DPG_PWR_STATE_MASK))
cpu               240 arch/arm/mach-bcm/platsmp-brcmstb.c 	cpu_rst_cfg_set(cpu, 1);
cpu               347 arch/arm/mach-bcm/platsmp-brcmstb.c static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               354 arch/arm/mach-bcm/platsmp-brcmstb.c 	if (brcmstb_cpu_get_power_state(cpu) == 0)
cpu               355 arch/arm/mach-bcm/platsmp-brcmstb.c 		brcmstb_cpu_power_on(cpu);
cpu               357 arch/arm/mach-bcm/platsmp-brcmstb.c 	brcmstb_cpu_boot(cpu);
cpu                76 arch/arm/mach-bcm/platsmp.c static u32 secondary_boot_addr_for(unsigned int cpu)
cpu                79 arch/arm/mach-bcm/platsmp.c 	struct device_node *cpu_node = of_get_cpu_node(cpu, NULL);
cpu                82 arch/arm/mach-bcm/platsmp.c 		pr_err("Failed to find device tree node for CPU%u\n", cpu);
cpu                90 arch/arm/mach-bcm/platsmp.c 			cpu);
cpu                97 arch/arm/mach-bcm/platsmp.c static int nsp_write_lut(unsigned int cpu)
cpu               101 arch/arm/mach-bcm/platsmp.c 	const u32 secondary_boot_addr = secondary_boot_addr_for(cpu);
cpu               109 arch/arm/mach-bcm/platsmp.c 		pr_warn("unable to ioremap SKU-ROM LUT register for cpu %u\n", cpu);
cpu               156 arch/arm/mach-bcm/platsmp.c static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               164 arch/arm/mach-bcm/platsmp.c 	const u32 secondary_boot_addr = secondary_boot_addr_for(cpu);
cpu               166 arch/arm/mach-bcm/platsmp.c 	cpu_id = cpu_logical_map(cpu);
cpu               214 arch/arm/mach-bcm/platsmp.c #define CDC_CMD_REG(cpu)	(CDC_CMD_OFFSET + 4*(cpu))
cpu               221 arch/arm/mach-bcm/platsmp.c static int bcm23550_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               247 arch/arm/mach-bcm/platsmp.c 	ret = kona_boot_secondary(cpu, idle);
cpu               254 arch/arm/mach-bcm/platsmp.c 	writel_relaxed(CDC_CMD, cdc_base + CDC_CMD_REG(cpu));
cpu               262 arch/arm/mach-bcm/platsmp.c static int nsp_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               270 arch/arm/mach-bcm/platsmp.c 	ret = nsp_write_lut(cpu);
cpu               277 arch/arm/mach-bcm/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu               283 arch/arm/mach-bcm/platsmp.c static int bcm2836_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               305 arch/arm/mach-bcm/platsmp.c 	       intc_base + LOCAL_MAILBOX3_SET0 + 16 * cpu);
cpu                33 arch/arm/mach-berlin/platsmp.c static inline void berlin_perform_reset_cpu(unsigned int cpu)
cpu                38 arch/arm/mach-berlin/platsmp.c 	val &= ~BIT(cpu_logical_map(cpu));
cpu                40 arch/arm/mach-berlin/platsmp.c 	val |= BIT(cpu_logical_map(cpu));
cpu                44 arch/arm/mach-berlin/platsmp.c static int berlin_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                53 arch/arm/mach-berlin/platsmp.c 	berlin_perform_reset_cpu(cpu);
cpu               100 arch/arm/mach-berlin/platsmp.c static void berlin_cpu_die(unsigned int cpu)
cpu               107 arch/arm/mach-berlin/platsmp.c static int berlin_cpu_kill(unsigned int cpu)
cpu               112 arch/arm/mach-berlin/platsmp.c 	val &= ~BIT(cpu_logical_map(cpu));
cpu               106 arch/arm/mach-cns3xxx/pm.c 	int cpu;
cpu               113 arch/arm/mach-cns3xxx/pm.c 	cpu = (300 + ((cpu_sel / 3) * 100) + ((cpu_sel % 3) * 33)) >> div_sel;
cpu               115 arch/arm/mach-cns3xxx/pm.c 	return cpu;
cpu               115 arch/arm/mach-exynos/common.h void exynos_set_boot_flag(unsigned int cpu, unsigned int mode);
cpu               116 arch/arm/mach-exynos/common.h void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode);
cpu               129 arch/arm/mach-exynos/common.h extern void exynos_cpu_power_down(int cpu);
cpu               130 arch/arm/mach-exynos/common.h extern void exynos_cpu_power_up(int cpu);
cpu               131 arch/arm/mach-exynos/common.h extern int  exynos_cpu_power_state(int cpu);
cpu                58 arch/arm/mach-exynos/firmware.c static int exynos_cpu_boot(int cpu)
cpu                70 arch/arm/mach-exynos/firmware.c 	exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0);
cpu                74 arch/arm/mach-exynos/firmware.c static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
cpu                89 arch/arm/mach-exynos/firmware.c 		boot_reg += 4 * cpu;
cpu                95 arch/arm/mach-exynos/firmware.c static int exynos_get_cpu_boot_addr(int cpu, unsigned long *boot_addr)
cpu               105 arch/arm/mach-exynos/firmware.c 		boot_reg += 4 * cpu;
cpu               233 arch/arm/mach-exynos/firmware.c void exynos_set_boot_flag(unsigned int cpu, unsigned int mode)
cpu               237 arch/arm/mach-exynos/firmware.c 	tmp = readl_relaxed(REG_CPU_STATE_ADDR + cpu * 4);
cpu               243 arch/arm/mach-exynos/firmware.c 	writel_relaxed(tmp, REG_CPU_STATE_ADDR + cpu * 4);
cpu               246 arch/arm/mach-exynos/firmware.c void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode)
cpu               250 arch/arm/mach-exynos/firmware.c 	tmp = readl_relaxed(REG_CPU_STATE_ADDR + cpu * 4);
cpu               252 arch/arm/mach-exynos/firmware.c 	writel_relaxed(tmp, REG_CPU_STATE_ADDR + cpu * 4);
cpu                58 arch/arm/mach-exynos/mcpm-exynos.c static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
cpu                60 arch/arm/mach-exynos/mcpm-exynos.c 	unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
cpu                62 arch/arm/mach-exynos/mcpm-exynos.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu                63 arch/arm/mach-exynos/mcpm-exynos.c 	if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
cpu                93 arch/arm/mach-exynos/mcpm-exynos.c 				       cpu, cluster);
cpu                98 arch/arm/mach-exynos/mcpm-exynos.c 			pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
cpu               116 arch/arm/mach-exynos/mcpm-exynos.c static void exynos_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
cpu               118 arch/arm/mach-exynos/mcpm-exynos.c 	unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
cpu               120 arch/arm/mach-exynos/mcpm-exynos.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu               121 arch/arm/mach-exynos/mcpm-exynos.c 	BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
cpu               163 arch/arm/mach-exynos/mcpm-exynos.c static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
cpu               166 arch/arm/mach-exynos/mcpm-exynos.c 	unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
cpu               168 arch/arm/mach-exynos/mcpm-exynos.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu               169 arch/arm/mach-exynos/mcpm-exynos.c 	BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
cpu               184 arch/arm/mach-exynos/mcpm-exynos.c static void exynos_cpu_is_up(unsigned int cpu, unsigned int cluster)
cpu               187 arch/arm/mach-exynos/mcpm-exynos.c 	exynos_cpu_powerup(cpu, cluster);
cpu                51 arch/arm/mach-exynos/platsmp.c static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
cpu                53 arch/arm/mach-exynos/platsmp.c 	u32 mpidr = cpu_logical_map(cpu);
cpu                90 arch/arm/mach-exynos/platsmp.c void exynos_cpu_power_down(int cpu)
cpu                94 arch/arm/mach-exynos/platsmp.c 	if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
cpu               106 arch/arm/mach-exynos/platsmp.c 	core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
cpu               108 arch/arm/mach-exynos/platsmp.c 	pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
cpu               117 arch/arm/mach-exynos/platsmp.c void exynos_cpu_power_up(int cpu)
cpu               125 arch/arm/mach-exynos/platsmp.c 			EXYNOS_ARM_CORE_CONFIGURATION(cpu));
cpu               133 arch/arm/mach-exynos/platsmp.c int exynos_cpu_power_state(int cpu)
cpu               135 arch/arm/mach-exynos/platsmp.c 	return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
cpu               196 arch/arm/mach-exynos/platsmp.c static inline void __iomem *cpu_boot_reg(int cpu)
cpu               204 arch/arm/mach-exynos/platsmp.c 		boot_reg += 4*cpu;
cpu               256 arch/arm/mach-exynos/platsmp.c static void exynos_secondary_init(unsigned int cpu)
cpu               321 arch/arm/mach-exynos/platsmp.c static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               324 arch/arm/mach-exynos/platsmp.c 	u32 mpidr = cpu_logical_map(cpu);
cpu               389 arch/arm/mach-exynos/platsmp.c 			arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu               426 arch/arm/mach-exynos/platsmp.c static void exynos_cpu_die(unsigned int cpu)
cpu               429 arch/arm/mach-exynos/platsmp.c 	u32 mpidr = cpu_logical_map(cpu);
cpu               434 arch/arm/mach-exynos/platsmp.c 	platform_do_lowpower(cpu, &spurious);
cpu               443 arch/arm/mach-exynos/platsmp.c 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
cpu               269 arch/arm/mach-exynos/suspend.c 	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               272 arch/arm/mach-exynos/suspend.c 		mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);
cpu                29 arch/arm/mach-highbank/sysregs.h 	int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
cpu                33 arch/arm/mach-highbank/sysregs.h 		writel_relaxed(1, sregs_base + SREG_CPU_PWR_CTRL(cpu));
cpu                38 arch/arm/mach-highbank/sysregs.h 	int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
cpu                42 arch/arm/mach-highbank/sysregs.h 		writel_relaxed(0, sregs_base + SREG_CPU_PWR_CTRL(cpu));
cpu                 7 arch/arm/mach-hisi/core.h extern void hi3xxx_set_cpu_jump(int cpu, void *jump_addr);
cpu                 8 arch/arm/mach-hisi/core.h extern int hi3xxx_get_cpu_jump(int cpu);
cpu                11 arch/arm/mach-hisi/core.h extern void hi3xxx_cpu_die(unsigned int cpu);
cpu                12 arch/arm/mach-hisi/core.h extern int hi3xxx_cpu_kill(unsigned int cpu);
cpu                13 arch/arm/mach-hisi/core.h extern void hi3xxx_set_cpu(int cpu, bool enable);
cpu                15 arch/arm/mach-hisi/core.h extern void hix5hd2_set_cpu(int cpu, bool enable);
cpu                16 arch/arm/mach-hisi/core.h extern void hix5hd2_cpu_die(unsigned int cpu);
cpu                18 arch/arm/mach-hisi/core.h extern void hip01_set_cpu(int cpu, bool enable);
cpu                76 arch/arm/mach-hisi/hotplug.c static void set_cpu_hi3620(int cpu, bool enable)
cpu                82 arch/arm/mach-hisi/hotplug.c 		if ((cpu == 2) || (cpu == 3))
cpu                83 arch/arm/mach-hisi/hotplug.c 			writel_relaxed(CPU2_ISO_CTRL << (cpu - 2),
cpu                88 arch/arm/mach-hisi/hotplug.c 		writel_relaxed(0x01 << cpu, ctrl_base + SCCPUCOREEN);
cpu                93 arch/arm/mach-hisi/hotplug.c 		writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS);
cpu                96 arch/arm/mach-hisi/hotplug.c 		writel_relaxed(val << cpu, ctrl_base + SCCPURSTEN);
cpu                99 arch/arm/mach-hisi/hotplug.c 		if ((cpu == 2) || (cpu == 3))
cpu               100 arch/arm/mach-hisi/hotplug.c 			writel_relaxed(CPU2_ISO_CTRL << (cpu - 2),
cpu               106 arch/arm/mach-hisi/hotplug.c 		val &= ~(CPU0_WFI_MASK_CFG << cpu);
cpu               112 arch/arm/mach-hisi/hotplug.c 		writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS);
cpu               116 arch/arm/mach-hisi/hotplug.c 		val |= (CPU0_WFI_MASK_CFG << cpu);
cpu               120 arch/arm/mach-hisi/hotplug.c 		writel_relaxed(0x01 << cpu, ctrl_base + SCCPUCOREDIS);
cpu               122 arch/arm/mach-hisi/hotplug.c 		if ((cpu == 2) || (cpu == 3)) {
cpu               124 arch/arm/mach-hisi/hotplug.c 			writel_relaxed(CPU2_ISO_CTRL << (cpu - 2),
cpu               132 arch/arm/mach-hisi/hotplug.c 		writel_relaxed(val << cpu, ctrl_base + SCCPURSTEN);
cpu               134 arch/arm/mach-hisi/hotplug.c 		if ((cpu == 2) || (cpu == 3)) {
cpu               136 arch/arm/mach-hisi/hotplug.c 			writel_relaxed(CPU2_ISO_CTRL << (cpu - 2),
cpu               164 arch/arm/mach-hisi/hotplug.c void hi3xxx_set_cpu(int cpu, bool enable)
cpu               172 arch/arm/mach-hisi/hotplug.c 		set_cpu_hi3620(cpu, enable);
cpu               191 arch/arm/mach-hisi/hotplug.c void hix5hd2_set_cpu(int cpu, bool enable)
cpu               223 arch/arm/mach-hisi/hotplug.c void hip01_set_cpu(int cpu, bool enable)
cpu               273 arch/arm/mach-hisi/hotplug.c void hi3xxx_cpu_die(unsigned int cpu)
cpu               276 arch/arm/mach-hisi/hotplug.c 	hi3xxx_set_cpu_jump(cpu, phys_to_virt(0));
cpu               280 arch/arm/mach-hisi/hotplug.c 	panic("cpu %d unexpectedly exit from shutdown\n", cpu);
cpu               283 arch/arm/mach-hisi/hotplug.c int hi3xxx_cpu_kill(unsigned int cpu)
cpu               287 arch/arm/mach-hisi/hotplug.c 	while (hi3xxx_get_cpu_jump(cpu))
cpu               290 arch/arm/mach-hisi/hotplug.c 	hi3xxx_set_cpu(cpu, false);
cpu               294 arch/arm/mach-hisi/hotplug.c void hix5hd2_cpu_die(unsigned int cpu)
cpu               297 arch/arm/mach-hisi/hotplug.c 	hix5hd2_set_cpu(cpu, false);
cpu               100 arch/arm/mach-hisi/platmcpm.c 	unsigned int mpidr, cpu, cluster;
cpu               105 arch/arm/mach-hisi/platmcpm.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               110 arch/arm/mach-hisi/platmcpm.c 	if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
cpu               115 arch/arm/mach-hisi/platmcpm.c 	if (hip04_cpu_table[cluster][cpu])
cpu               130 arch/arm/mach-hisi/platmcpm.c 	data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
cpu               131 arch/arm/mach-hisi/platmcpm.c 	       CORE_DEBUG_RESET_BIT(cpu);
cpu               146 arch/arm/mach-hisi/platmcpm.c 	hip04_cpu_table[cluster][cpu]++;
cpu               155 arch/arm/mach-hisi/platmcpm.c 	unsigned int mpidr, cpu, cluster;
cpu               159 arch/arm/mach-hisi/platmcpm.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               163 arch/arm/mach-hisi/platmcpm.c 	hip04_cpu_table[cluster][cpu]--;
cpu               164 arch/arm/mach-hisi/platmcpm.c 	if (hip04_cpu_table[cluster][cpu] == 1) {
cpu               168 arch/arm/mach-hisi/platmcpm.c 	} else if (hip04_cpu_table[cluster][cpu] > 1) {
cpu               169 arch/arm/mach-hisi/platmcpm.c 		pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
cpu               193 arch/arm/mach-hisi/platmcpm.c 	unsigned int mpidr, cpu, cluster;
cpu               197 arch/arm/mach-hisi/platmcpm.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               200 arch/arm/mach-hisi/platmcpm.c 	       cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
cpu               205 arch/arm/mach-hisi/platmcpm.c 		if (hip04_cpu_table[cluster][cpu])
cpu               209 arch/arm/mach-hisi/platmcpm.c 		if (data & CORE_WFI_STATUS(cpu))
cpu               218 arch/arm/mach-hisi/platmcpm.c 	data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
cpu               219 arch/arm/mach-hisi/platmcpm.c 	       CORE_DEBUG_RESET_BIT(cpu);
cpu               224 arch/arm/mach-hisi/platmcpm.c 		if (data & CORE_RESET_STATUS(cpu))
cpu               249 arch/arm/mach-hisi/platmcpm.c 	unsigned int mpidr, cpu, cluster;
cpu               252 arch/arm/mach-hisi/platmcpm.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               256 arch/arm/mach-hisi/platmcpm.c 	    cpu >= HIP04_MAX_CPUS_PER_CLUSTER) {
cpu               261 arch/arm/mach-hisi/platmcpm.c 	hip04_cpu_table[cluster][cpu] = 1;
cpu                23 arch/arm/mach-hisi/platsmp.c void hi3xxx_set_cpu_jump(int cpu, void *jump_addr)
cpu                25 arch/arm/mach-hisi/platsmp.c 	cpu = cpu_logical_map(cpu);
cpu                26 arch/arm/mach-hisi/platsmp.c 	if (!cpu || !ctrl_base)
cpu                28 arch/arm/mach-hisi/platsmp.c 	writel_relaxed(__pa_symbol(jump_addr), ctrl_base + ((cpu - 1) << 2));
cpu                31 arch/arm/mach-hisi/platsmp.c int hi3xxx_get_cpu_jump(int cpu)
cpu                33 arch/arm/mach-hisi/platsmp.c 	cpu = cpu_logical_map(cpu);
cpu                34 arch/arm/mach-hisi/platsmp.c 	if (!cpu || !ctrl_base)
cpu                36 arch/arm/mach-hisi/platsmp.c 	return readl_relaxed(ctrl_base + ((cpu - 1) << 2));
cpu                81 arch/arm/mach-hisi/platsmp.c static int hi3xxx_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                83 arch/arm/mach-hisi/platsmp.c 	hi3xxx_set_cpu(cpu, true);
cpu                84 arch/arm/mach-hisi/platsmp.c 	hi3xxx_set_cpu_jump(cpu, secondary_startup);
cpu                85 arch/arm/mach-hisi/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu               114 arch/arm/mach-hisi/platsmp.c static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               120 arch/arm/mach-hisi/platsmp.c 	hix5hd2_set_cpu(cpu, true);
cpu               121 arch/arm/mach-hisi/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu               149 arch/arm/mach-hisi/platsmp.c static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               171 arch/arm/mach-hisi/platsmp.c 	hip01_set_cpu(cpu, true);
cpu                80 arch/arm/mach-imx/common.h void imx_enable_cpu(int cpu, bool enable);
cpu                81 arch/arm/mach-imx/common.h void imx_set_cpu_jump(int cpu, void *jump_addr);
cpu                82 arch/arm/mach-imx/common.h u32 imx_get_cpu_arg(int cpu);
cpu                83 arch/arm/mach-imx/common.h void imx_set_cpu_arg(int cpu, u32 arg);
cpu               108 arch/arm/mach-imx/common.h void imx_cpu_die(unsigned int cpu);
cpu               109 arch/arm/mach-imx/common.h int imx_cpu_kill(unsigned int cpu);
cpu                31 arch/arm/mach-imx/cpu.c void imx_print_silicon_rev(const char *cpu, int srev)
cpu                34 arch/arm/mach-imx/cpu.c 		pr_info("CPU identified as %s, unknown revision\n", cpu);
cpu                37 arch/arm/mach-imx/cpu.c 				cpu, (srev >> 4) & 0xf, srev & 0xf);
cpu                40 arch/arm/mach-imx/hotplug.c void imx_cpu_die(unsigned int cpu)
cpu                48 arch/arm/mach-imx/hotplug.c 	imx_set_cpu_arg(cpu, ~0);
cpu                54 arch/arm/mach-imx/hotplug.c int imx_cpu_kill(unsigned int cpu)
cpu                58 arch/arm/mach-imx/hotplug.c 	while (imx_get_cpu_arg(cpu) == 0)
cpu                61 arch/arm/mach-imx/hotplug.c 	imx_enable_cpu(cpu, false);
cpu                62 arch/arm/mach-imx/hotplug.c 	imx_set_cpu_arg(cpu, 0);
cpu                99 arch/arm/mach-imx/mmdc.c 	cpumask_t cpu;
cpu               127 arch/arm/mach-imx/mmdc.c 	return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu);
cpu               214 arch/arm/mach-imx/mmdc.c static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
cpu               219 arch/arm/mach-imx/mmdc.c 	if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu))
cpu               222 arch/arm/mach-imx/mmdc.c 	target = cpumask_any_but(cpu_online_mask, cpu);
cpu               226 arch/arm/mach-imx/mmdc.c 	perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
cpu               227 arch/arm/mach-imx/mmdc.c 	cpumask_set_cpu(target, &pmu_mmdc->cpu);
cpu               286 arch/arm/mach-imx/mmdc.c 	if (event->cpu < 0) {
cpu               300 arch/arm/mach-imx/mmdc.c 	event->cpu = cpumask_first(&pmu_mmdc->cpu);
cpu               509 arch/arm/mach-imx/mmdc.c 	cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
cpu                43 arch/arm/mach-imx/platsmp.c static int imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                45 arch/arm/mach-imx/platsmp.c 	imx_set_cpu_jump(cpu, v7_secondary_startup);
cpu                46 arch/arm/mach-imx/platsmp.c 	imx_enable_cpu(cpu, true);
cpu                97 arch/arm/mach-imx/platsmp.c static int ls1021a_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                99 arch/arm/mach-imx/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu                79 arch/arm/mach-imx/src.c void imx_enable_cpu(int cpu, bool enable)
cpu                83 arch/arm/mach-imx/src.c 	cpu = cpu_logical_map(cpu);
cpu                84 arch/arm/mach-imx/src.c 	mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
cpu                88 arch/arm/mach-imx/src.c 	val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1);
cpu                93 arch/arm/mach-imx/src.c void imx_set_cpu_jump(int cpu, void *jump_addr)
cpu                95 arch/arm/mach-imx/src.c 	cpu = cpu_logical_map(cpu);
cpu                97 arch/arm/mach-imx/src.c 		       src_base + SRC_GPR1 + cpu * 8);
cpu               100 arch/arm/mach-imx/src.c u32 imx_get_cpu_arg(int cpu)
cpu               102 arch/arm/mach-imx/src.c 	cpu = cpu_logical_map(cpu);
cpu               103 arch/arm/mach-imx/src.c 	return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
cpu               106 arch/arm/mach-imx/src.c void imx_set_cpu_arg(int cpu, u32 arg)
cpu               108 arch/arm/mach-imx/src.c 	cpu = cpu_logical_map(cpu);
cpu               109 arch/arm/mach-imx/src.c 	writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
cpu                17 arch/arm/mach-keystone/keystone.h extern u32 keystone_cpu_smc(u32 command, u32 cpu, u32 addr);
cpu                23 arch/arm/mach-keystone/platsmp.c static int keystone_smp_boot_secondary(unsigned int cpu,
cpu                30 arch/arm/mach-keystone/platsmp.c 		 cpu, start);
cpu                32 arch/arm/mach-keystone/platsmp.c 	error = keystone_cpu_smc(KEYSTONE_MON_CPU_UP_IDX, cpu, start);
cpu                34 arch/arm/mach-keystone/platsmp.c 		pr_err("CPU %d bringup failed with %d\n", cpu, error);
cpu                61 arch/arm/mach-mediatek/platsmp.c static int mtk_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                66 arch/arm/mach-mediatek/platsmp.c 	if (!mtk_smp_info->core_keys[cpu-1])
cpu                69 arch/arm/mach-mediatek/platsmp.c 	writel_relaxed(mtk_smp_info->core_keys[cpu-1],
cpu                70 arch/arm/mach-mediatek/platsmp.c 		mtk_smp_base + mtk_smp_info->core_regs[cpu-1]);
cpu                72 arch/arm/mach-mediatek/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu                38 arch/arm/mach-meson/platsmp.c static struct reset_control *meson_smp_get_core_reset(int cpu)
cpu                40 arch/arm/mach-meson/platsmp.c 	struct device_node *np = of_get_cpu_node(cpu, 0);
cpu                45 arch/arm/mach-meson/platsmp.c static void meson_smp_set_cpu_ctrl(int cpu, bool on_off)
cpu                50 arch/arm/mach-meson/platsmp.c 		val |= BIT(cpu);
cpu                52 arch/arm/mach-meson/platsmp.c 		val &= ~BIT(cpu);
cpu               114 arch/arm/mach-meson/platsmp.c static void meson_smp_begin_secondary_boot(unsigned int cpu)
cpu               123 arch/arm/mach-meson/platsmp.c 	       sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu));
cpu               129 arch/arm/mach-meson/platsmp.c 	scu_cpu_power_enable(scu_base, cpu);
cpu               132 arch/arm/mach-meson/platsmp.c static int meson_smp_finalize_secondary_boot(unsigned int cpu)
cpu               137 arch/arm/mach-meson/platsmp.c 	while (readl(sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu))) {
cpu               140 arch/arm/mach-meson/platsmp.c 			       cpu);
cpu               146 arch/arm/mach-meson/platsmp.c 	       sram_base + MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(cpu));
cpu               148 arch/arm/mach-meson/platsmp.c 	meson_smp_set_cpu_ctrl(cpu, true);
cpu               153 arch/arm/mach-meson/platsmp.c static int meson8_smp_boot_secondary(unsigned int cpu,
cpu               159 arch/arm/mach-meson/platsmp.c 	rstc = meson_smp_get_core_reset(cpu);
cpu               161 arch/arm/mach-meson/platsmp.c 		pr_err("Couldn't get the reset controller for CPU%d\n", cpu);
cpu               165 arch/arm/mach-meson/platsmp.c 	meson_smp_begin_secondary_boot(cpu);
cpu               170 arch/arm/mach-meson/platsmp.c 		pr_err("Failed to assert CPU%d reset\n", cpu);
cpu               176 arch/arm/mach-meson/platsmp.c 				 MESON_CPU_PWR_A9_CNTL1_M(cpu), 0);
cpu               178 arch/arm/mach-meson/platsmp.c 		pr_err("Couldn't wake up CPU%d\n", cpu);
cpu               185 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
cpu               188 arch/arm/mach-meson/platsmp.c 		pr_err("Error when disabling isolation of CPU%d\n", cpu);
cpu               195 arch/arm/mach-meson/platsmp.c 		pr_err("Failed to de-assert CPU%d reset\n", cpu);
cpu               199 arch/arm/mach-meson/platsmp.c 	ret = meson_smp_finalize_secondary_boot(cpu);
cpu               209 arch/arm/mach-meson/platsmp.c static int meson8b_smp_boot_secondary(unsigned int cpu,
cpu               216 arch/arm/mach-meson/platsmp.c 	rstc = meson_smp_get_core_reset(cpu);
cpu               218 arch/arm/mach-meson/platsmp.c 		pr_err("Couldn't get the reset controller for CPU%d\n", cpu);
cpu               222 arch/arm/mach-meson/platsmp.c 	meson_smp_begin_secondary_boot(cpu);
cpu               226 arch/arm/mach-meson/platsmp.c 				 MESON_CPU_PWR_A9_CNTL0_M(cpu), 0);
cpu               228 arch/arm/mach-meson/platsmp.c 		pr_err("Couldn't power up CPU%d\n", cpu);
cpu               237 arch/arm/mach-meson/platsmp.c 		pr_err("Failed to assert CPU%d reset\n", cpu);
cpu               243 arch/arm/mach-meson/platsmp.c 				 MESON_CPU_PWR_A9_MEM_PD0_M(cpu), 0);
cpu               245 arch/arm/mach-meson/platsmp.c 		pr_err("Couldn't power up the memory for CPU%d\n", cpu);
cpu               251 arch/arm/mach-meson/platsmp.c 				 MESON_CPU_PWR_A9_CNTL1_M(cpu), 0);
cpu               253 arch/arm/mach-meson/platsmp.c 		pr_err("Couldn't wake up CPU%d\n", cpu);
cpu               260 arch/arm/mach-meson/platsmp.c 				       val & MESON_CPU_PWR_A9_CNTL1_ST(cpu),
cpu               263 arch/arm/mach-meson/platsmp.c 		pr_err("Timeout while polling PMU for CPU%d status\n", cpu);
cpu               268 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
cpu               271 arch/arm/mach-meson/platsmp.c 		pr_err("Error when disabling isolation of CPU%d\n", cpu);
cpu               278 arch/arm/mach-meson/platsmp.c 		pr_err("Failed to de-assert CPU%d reset\n", cpu);
cpu               282 arch/arm/mach-meson/platsmp.c 	ret = meson_smp_finalize_secondary_boot(cpu);
cpu               293 arch/arm/mach-meson/platsmp.c static void meson8_smp_cpu_die(unsigned int cpu)
cpu               295 arch/arm/mach-meson/platsmp.c 	meson_smp_set_cpu_ctrl(cpu, false);
cpu               308 arch/arm/mach-meson/platsmp.c static int meson8_smp_cpu_kill(unsigned int cpu)
cpu               315 arch/arm/mach-meson/platsmp.c 		power_mode = scu_get_cpu_power_mode(scu_base, cpu);
cpu               325 arch/arm/mach-meson/platsmp.c 		       cpu);
cpu               332 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
cpu               335 arch/arm/mach-meson/platsmp.c 		pr_err("Error when enabling isolation for CPU%d\n", cpu);
cpu               343 arch/arm/mach-meson/platsmp.c 				 MESON_CPU_PWR_A9_CNTL1_M(cpu), 0x3);
cpu               345 arch/arm/mach-meson/platsmp.c 		pr_err("Couldn't change sleep status of CPU%d\n", cpu);
cpu               352 arch/arm/mach-meson/platsmp.c static int meson8b_smp_cpu_kill(unsigned int cpu)
cpu               357 arch/arm/mach-meson/platsmp.c 		power_mode = scu_get_cpu_power_mode(scu_base, cpu);
cpu               367 arch/arm/mach-meson/platsmp.c 		       cpu);
cpu               375 arch/arm/mach-meson/platsmp.c 				 MESON_CPU_PWR_A9_CNTL0_M(cpu), 0x3);
cpu               377 arch/arm/mach-meson/platsmp.c 		pr_err("Couldn't power down CPU%d\n", cpu);
cpu               382 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
cpu               385 arch/arm/mach-meson/platsmp.c 		pr_err("Error when enabling isolation for CPU%d\n", cpu);
cpu               393 arch/arm/mach-meson/platsmp.c 				 MESON_CPU_PWR_A9_CNTL1_M(cpu), 0x3);
cpu               395 arch/arm/mach-meson/platsmp.c 		pr_err("Couldn't change sleep status of CPU%d\n", cpu);
cpu               401 arch/arm/mach-meson/platsmp.c 				 MESON_CPU_PWR_A9_MEM_PD0_M(cpu), 0xf);
cpu               403 arch/arm/mach-meson/platsmp.c 		pr_err("Couldn't power down the memory of CPU%d\n", cpu);
cpu                25 arch/arm/mach-milbeaut/platsmp.c 	unsigned int mpidr, cpu, cluster;
cpu                31 arch/arm/mach-milbeaut/platsmp.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu                34 arch/arm/mach-milbeaut/platsmp.c 	if (cpu >= M10V_MAX_CPU)
cpu                38 arch/arm/mach-milbeaut/platsmp.c 			__func__, cpu, l_cpu, cluster);
cpu                40 arch/arm/mach-milbeaut/platsmp.c 	writel(__pa_symbol(secondary_startup), m10v_smp_base + cpu * 4);
cpu                48 arch/arm/mach-milbeaut/platsmp.c 	unsigned int mpidr, cpu, cluster;
cpu                60 arch/arm/mach-milbeaut/platsmp.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu                62 arch/arm/mach-milbeaut/platsmp.c 	pr_info("MCPM boot on cpu_%u cluster_%u\n", cpu, cluster);
cpu                64 arch/arm/mach-milbeaut/platsmp.c 	for (cpu = 0; cpu < M10V_MAX_CPU; cpu++)
cpu                65 arch/arm/mach-milbeaut/platsmp.c 		writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
cpu                78 arch/arm/mach-milbeaut/platsmp.c 	unsigned int mpidr, cpu;
cpu                81 arch/arm/mach-milbeaut/platsmp.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu                83 arch/arm/mach-milbeaut/platsmp.c 	writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
cpu               114 arch/arm/mach-mvebu/coherency.c static int armada_xp_clear_l2_starting(unsigned int cpu)
cpu                21 arch/arm/mach-mvebu/common.h int mvebu_cpu_reset_deassert(int cpu);
cpu                24 arch/arm/mach-mvebu/cpu-reset.c #define CPU_RESET_OFFSET(cpu) (cpu * 0x8)
cpu                27 arch/arm/mach-mvebu/cpu-reset.c int mvebu_cpu_reset_deassert(int cpu)
cpu                34 arch/arm/mach-mvebu/cpu-reset.c 	if (CPU_RESET_OFFSET(cpu) >= cpu_reset_size)
cpu                37 arch/arm/mach-mvebu/cpu-reset.c 	reg = readl(cpu_reset_base + CPU_RESET_OFFSET(cpu));
cpu                39 arch/arm/mach-mvebu/cpu-reset.c 	writel(reg, cpu_reset_base + CPU_RESET_OFFSET(cpu));
cpu                27 arch/arm/mach-mvebu/platsmp-a9.c static int mvebu_cortex_a9_boot_secondary(unsigned int cpu,
cpu                32 arch/arm/mach-mvebu/platsmp-a9.c 	pr_info("Booting CPU %d\n", cpu);
cpu                40 arch/arm/mach-mvebu/platsmp-a9.c 	hw_cpu = cpu_logical_map(cpu);
cpu                51 arch/arm/mach-mvebu/platsmp-a9.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu                69 arch/arm/mach-mvebu/platsmp-a9.c static void armada_38x_secondary_init(unsigned int cpu)
cpu                75 arch/arm/mach-mvebu/platsmp-a9.c static void armada_38x_cpu_die(unsigned int cpu)
cpu                90 arch/arm/mach-mvebu/platsmp-a9.c static int armada_38x_cpu_kill(unsigned int cpu)
cpu                40 arch/arm/mach-mvebu/platsmp.c static struct clk *get_cpu_clk(int cpu)
cpu                43 arch/arm/mach-mvebu/platsmp.c 	struct device_node *np = of_get_cpu_node(cpu, NULL);
cpu                53 arch/arm/mach-mvebu/platsmp.c static int armada_xp_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                57 arch/arm/mach-mvebu/platsmp.c 	pr_info("Booting CPU %d\n", cpu);
cpu                59 arch/arm/mach-mvebu/platsmp.c 	hw_cpu = cpu_logical_map(cpu);
cpu                66 arch/arm/mach-mvebu/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu                89 arch/arm/mach-mvebu/platsmp.c static void armada_xp_secondary_init(unsigned int cpu)
cpu               102 arch/arm/mach-mvebu/platsmp.c static int armada_xp_sync_secondary_clk(unsigned int cpu)
cpu               104 arch/arm/mach-mvebu/platsmp.c 	struct clk *cpu_clk = get_cpu_clk(cpu);
cpu               151 arch/arm/mach-mvebu/platsmp.c static void armada_xp_cpu_die(unsigned int cpu)
cpu               166 arch/arm/mach-mvebu/platsmp.c static int armada_xp_cpu_kill(unsigned int cpu)
cpu               219 arch/arm/mach-mvebu/platsmp.c static int mv98dx3236_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               223 arch/arm/mach-mvebu/platsmp.c 	hw_cpu = cpu_logical_map(cpu);
cpu               231 arch/arm/mach-mvebu/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu                48 arch/arm/mach-mvebu/pmsu.c #define PMSU_CONTROL_AND_CONFIG(cpu)	    ((cpu * 0x100) + 0x104)
cpu                53 arch/arm/mach-mvebu/pmsu.c #define PMSU_CPU_POWER_DOWN_CONTROL(cpu)    ((cpu * 0x100) + 0x108)
cpu                57 arch/arm/mach-mvebu/pmsu.c #define PMSU_STATUS_AND_MASK(cpu)	    ((cpu * 0x100) + 0x10c)
cpu                66 arch/arm/mach-mvebu/pmsu.c #define PMSU_EVENT_STATUS_AND_MASK(cpu)     ((cpu * 0x100) + 0x120)
cpu                70 arch/arm/mach-mvebu/pmsu.c #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
cpu               540 arch/arm/mach-mvebu/pmsu.c 	u32 cpu = smp_processor_id();
cpu               546 arch/arm/mach-mvebu/pmsu.c 	reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
cpu               550 arch/arm/mach-mvebu/pmsu.c 	writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
cpu               553 arch/arm/mach-mvebu/pmsu.c 	reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
cpu               555 arch/arm/mach-mvebu/pmsu.c 	writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
cpu               564 arch/arm/mach-mvebu/pmsu.c 	reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
cpu               566 arch/arm/mach-mvebu/pmsu.c 	writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
cpu               571 arch/arm/mach-mvebu/pmsu.c int mvebu_pmsu_dfs_request(int cpu)
cpu               574 arch/arm/mach-mvebu/pmsu.c 	int hwcpu = cpu_logical_map(cpu);
cpu               588 arch/arm/mach-mvebu/pmsu.c 	smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local,
cpu                24 arch/arm/mach-npcm/platsmp.c static int npcm7xx_smp_boot_secondary(unsigned int cpu,
cpu                22 arch/arm/mach-omap1/clock.h 	u16				cpu;
cpu                28 arch/arm/mach-omap1/clock.h 		 .cpu = cp,		\
cpu               802 arch/arm/mach-omap1/clock_data.c 		if (c->cpu & cpu_mask) {
cpu               271 arch/arm/mach-omap2/common.h extern void omap4_cpu_die(unsigned int cpu);
cpu               272 arch/arm/mach-omap2/common.h extern int omap4_cpu_kill(unsigned int cpu);
cpu               281 arch/arm/mach-omap2/common.h extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
cpu               282 arch/arm/mach-omap2/common.h extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
cpu               284 arch/arm/mach-omap2/common.h static inline int omap4_enter_lowpower(unsigned int cpu,
cpu               291 arch/arm/mach-omap2/common.h static inline int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
cpu               108 arch/arm/mach-omap2/cpuidle44xx.c 	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu               132 arch/arm/mach-omap2/cpuidle44xx.c 	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
cpu               164 arch/arm/mach-omap2/cpuidle44xx.c 	if (dev->cpu == 0) {
cpu               176 arch/arm/mach-omap2/cpuidle44xx.c 	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu               177 arch/arm/mach-omap2/cpuidle44xx.c 	cpu_done[dev->cpu] = true;
cpu               180 arch/arm/mach-omap2/cpuidle44xx.c 	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
cpu               210 arch/arm/mach-omap2/cpuidle44xx.c 	if (dev->cpu == 0 && mpuss_can_lose_context)
cpu               217 arch/arm/mach-omap2/cpuidle44xx.c 	cpu_done[dev->cpu] = false;
cpu                27 arch/arm/mach-omap2/omap-hotplug.c void omap4_cpu_die(unsigned int cpu)
cpu                47 arch/arm/mach-omap2/omap-hotplug.c 		omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
cpu                61 arch/arm/mach-omap2/omap-hotplug.c 		pr_debug("CPU%u: spurious wakeup call\n", cpu);
cpu                66 arch/arm/mach-omap2/omap-hotplug.c int omap4_cpu_kill(unsigned int cpu)
cpu               226 arch/arm/mach-omap2/omap-mpuss-lowpower.c int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
cpu               228 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
cpu               270 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	cpu_clear_prev_logic_pwrst(cpu);
cpu               273 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume));
cpu               274 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	omap_pm_ops.scu_prepare(cpu, power_state);
cpu               275 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	l2x0_pwrst_prepare(cpu, save_state);
cpu               285 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu)
cpu               308 arch/arm/mach-omap2/omap-mpuss-lowpower.c int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
cpu               310 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
cpu               325 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart));
cpu               326 arch/arm/mach-omap2/omap-mpuss-lowpower.c 	omap_pm_ops.scu_prepare(cpu, power_state);
cpu               146 arch/arm/mach-omap2/omap-smp.c static void omap4_secondary_init(unsigned int cpu)
cpu               173 arch/arm/mach-omap2/omap-smp.c static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               250 arch/arm/mach-omap2/omap-smp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu                75 arch/arm/mach-omap2/omap-wakeupgen.c static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
cpu                78 arch/arm/mach-omap2/omap-wakeupgen.c 				(cpu * CPU_ENA_OFFSET) + (idx * 4));
cpu                81 arch/arm/mach-omap2/omap-wakeupgen.c static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
cpu                84 arch/arm/mach-omap2/omap-wakeupgen.c 				(cpu * CPU_ENA_OFFSET) + (idx * 4));
cpu               104 arch/arm/mach-omap2/omap-wakeupgen.c static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
cpu               112 arch/arm/mach-omap2/omap-wakeupgen.c 	val = wakeupgen_readl(i, cpu);
cpu               114 arch/arm/mach-omap2/omap-wakeupgen.c 	wakeupgen_writel(val, i, cpu);
cpu               117 arch/arm/mach-omap2/omap-wakeupgen.c static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
cpu               125 arch/arm/mach-omap2/omap-wakeupgen.c 	val = wakeupgen_readl(i, cpu);
cpu               127 arch/arm/mach-omap2/omap-wakeupgen.c 	wakeupgen_writel(val, i, cpu);
cpu               190 arch/arm/mach-omap2/omap-wakeupgen.c static void _wakeupgen_save_masks(unsigned int cpu)
cpu               195 arch/arm/mach-omap2/omap-wakeupgen.c 		per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
cpu               198 arch/arm/mach-omap2/omap-wakeupgen.c static void _wakeupgen_restore_masks(unsigned int cpu)
cpu               203 arch/arm/mach-omap2/omap-wakeupgen.c 		wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
cpu               206 arch/arm/mach-omap2/omap-wakeupgen.c static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
cpu               211 arch/arm/mach-omap2/omap-wakeupgen.c 		wakeupgen_writel(reg, i, cpu);
cpu               221 arch/arm/mach-omap2/omap-wakeupgen.c static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
cpu               227 arch/arm/mach-omap2/omap-wakeupgen.c 		_wakeupgen_save_masks(cpu);
cpu               228 arch/arm/mach-omap2/omap-wakeupgen.c 		_wakeupgen_set_all(cpu, WKG_MASK_ALL);
cpu               230 arch/arm/mach-omap2/omap-wakeupgen.c 		_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
cpu               231 arch/arm/mach-omap2/omap-wakeupgen.c 		_wakeupgen_restore_masks(cpu);
cpu               406 arch/arm/mach-omap2/omap-wakeupgen.c static int omap_wakeupgen_cpu_online(unsigned int cpu)
cpu               408 arch/arm/mach-omap2/omap-wakeupgen.c 	wakeupgen_irqmask_all(cpu, 0);
cpu               412 arch/arm/mach-omap2/omap-wakeupgen.c static int omap_wakeupgen_cpu_dead(unsigned int cpu)
cpu               414 arch/arm/mach-omap2/omap-wakeupgen.c 	wakeupgen_irqmask_all(cpu, 1);
cpu                26 arch/arm/mach-oxnas/platsmp.c #define GIC_NCPU_OFFSET(cpu)		(0x100 + (cpu)*0x100)
cpu                30 arch/arm/mach-oxnas/platsmp.c int __init ox820_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                41 arch/arm/mach-oxnas/platsmp.c 	writel(cpu, cpu_ctrl + HOLDINGPEN_CPU_OFFSET);
cpu                47 arch/arm/mach-oxnas/platsmp.c 		gic_cpu_ctrl + GIC_NCPU_OFFSET(cpu) + GIC_CPU_CTRL);
cpu                54 arch/arm/mach-oxnas/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu                21 arch/arm/mach-prima2/common.h extern void sirfsoc_cpu_die(unsigned int cpu);
cpu                15 arch/arm/mach-prima2/hotplug.c static inline void platform_do_lowpower(unsigned int cpu)
cpu                21 arch/arm/mach-prima2/hotplug.c 		if (prima2_pen_release == cpu_logical_map(cpu)) {
cpu                35 arch/arm/mach-prima2/hotplug.c void sirfsoc_cpu_die(unsigned int cpu)
cpu                37 arch/arm/mach-prima2/hotplug.c 	platform_do_lowpower(cpu);
cpu                29 arch/arm/mach-prima2/platsmp.c static void sirfsoc_secondary_init(unsigned int cpu)
cpu                50 arch/arm/mach-prima2/platsmp.c static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                90 arch/arm/mach-prima2/platsmp.c 	prima2_pen_release = cpu_logical_map(cpu);
cpu                47 arch/arm/mach-qcom/platsmp.c static void qcom_cpu_die(unsigned int cpu)
cpu                53 arch/arm/mach-qcom/platsmp.c static int scss_release_secondary(unsigned int cpu)
cpu                78 arch/arm/mach-qcom/platsmp.c static int kpssv1_release_secondary(unsigned int cpu)
cpu                85 arch/arm/mach-qcom/platsmp.c 	cpu_node = of_get_cpu_node(cpu, NULL);
cpu               157 arch/arm/mach-qcom/platsmp.c static int kpssv2_release_secondary(unsigned int cpu)
cpu               165 arch/arm/mach-qcom/platsmp.c 	cpu_node = of_get_cpu_node(cpu, NULL);
cpu               259 arch/arm/mach-qcom/platsmp.c static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
cpu               263 arch/arm/mach-qcom/platsmp.c 	if (!per_cpu(cold_boot_done, cpu)) {
cpu               264 arch/arm/mach-qcom/platsmp.c 		ret = func(cpu);
cpu               266 arch/arm/mach-qcom/platsmp.c 			per_cpu(cold_boot_done, cpu) = true;
cpu               274 arch/arm/mach-qcom/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu               279 arch/arm/mach-qcom/platsmp.c static int msm8660_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               281 arch/arm/mach-qcom/platsmp.c 	return qcom_boot_secondary(cpu, scss_release_secondary);
cpu               284 arch/arm/mach-qcom/platsmp.c static int kpssv1_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               286 arch/arm/mach-qcom/platsmp.c 	return qcom_boot_secondary(cpu, kpssv1_release_secondary);
cpu               289 arch/arm/mach-qcom/platsmp.c static int kpssv2_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               291 arch/arm/mach-qcom/platsmp.c 	return qcom_boot_secondary(cpu, kpssv2_release_secondary);
cpu               296 arch/arm/mach-qcom/platsmp.c 	int cpu;
cpu               300 arch/arm/mach-qcom/platsmp.c 		for_each_present_cpu(cpu) {
cpu               301 arch/arm/mach-qcom/platsmp.c 			if (cpu == smp_processor_id())
cpu               303 arch/arm/mach-qcom/platsmp.c 			set_cpu_present(cpu, false);
cpu                79 arch/arm/mach-realview/platsmp-dt.c static void realview_cpu_die(unsigned int cpu)
cpu                81 arch/arm/mach-realview/platsmp-dt.c 	return versatile_immitation_cpu_die(cpu, 0x20);
cpu                50 arch/arm/mach-rockchip/platsmp.c static struct reset_control *rockchip_get_core_reset(int cpu)
cpu                52 arch/arm/mach-rockchip/platsmp.c 	struct device *dev = get_cpu_device(cpu);
cpu                59 arch/arm/mach-rockchip/platsmp.c 		np = of_get_cpu_node(cpu, NULL);
cpu               116 arch/arm/mach-rockchip/platsmp.c static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               125 arch/arm/mach-rockchip/platsmp.c 	if (cpu >= ncores) {
cpu               127 arch/arm/mach-rockchip/platsmp.c 		       __func__, cpu, ncores);
cpu               132 arch/arm/mach-rockchip/platsmp.c 	ret = pmu_set_power_domain(0 + cpu, true);
cpu               334 arch/arm/mach-rockchip/platsmp.c static int rockchip_cpu_kill(unsigned int cpu)
cpu               343 arch/arm/mach-rockchip/platsmp.c 	pmu_set_power_domain(0 + cpu, false);
cpu               347 arch/arm/mach-rockchip/platsmp.c static void rockchip_cpu_die(unsigned int cpu)
cpu                70 arch/arm/mach-sa1100/generic.c unsigned int sa11x0_getspeed(unsigned int cpu)
cpu                72 arch/arm/mach-sa1100/generic.c 	if (cpu)
cpu                25 arch/arm/mach-sa1100/generic.h extern unsigned int sa11x0_getspeed(unsigned int cpu);
cpu                15 arch/arm/mach-shmobile/common.h extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
cpu                17 arch/arm/mach-shmobile/common.h extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
cpu                22 arch/arm/mach-shmobile/common.h extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
cpu                23 arch/arm/mach-shmobile/common.h extern int shmobile_smp_scu_cpu_kill(unsigned int cpu);
cpu                79 arch/arm/mach-shmobile/platsmp-apmu.c static int __maybe_unused apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu))
cpu                81 arch/arm/mach-shmobile/platsmp-apmu.c 	void __iomem *p = apmu_cpus[cpu].iomem;
cpu                83 arch/arm/mach-shmobile/platsmp-apmu.c 	return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL;
cpu               117 arch/arm/mach-shmobile/platsmp-apmu.c static void shmobile_smp_apmu_cpu_shutdown(unsigned int cpu)
cpu               121 arch/arm/mach-shmobile/platsmp-apmu.c 	apmu_wrap(cpu, apmu_power_off);
cpu               129 arch/arm/mach-shmobile/platsmp-apmu.c static void shmobile_smp_apmu_cpu_die(unsigned int cpu)
cpu               132 arch/arm/mach-shmobile/platsmp-apmu.c 	shmobile_smp_hook(cpu, 0, 0);
cpu               135 arch/arm/mach-shmobile/platsmp-apmu.c 	shmobile_smp_apmu_cpu_shutdown(cpu);
cpu               141 arch/arm/mach-shmobile/platsmp-apmu.c static int shmobile_smp_apmu_cpu_kill(unsigned int cpu)
cpu               143 arch/arm/mach-shmobile/platsmp-apmu.c 	return apmu_wrap(cpu, apmu_power_off_poll);
cpu               148 arch/arm/mach-shmobile/platsmp-apmu.c static int shmobile_smp_apmu_do_suspend(unsigned long cpu)
cpu               150 arch/arm/mach-shmobile/platsmp-apmu.c 	shmobile_smp_hook(cpu, __pa_symbol(cpu_resume), 0);
cpu               151 arch/arm/mach-shmobile/platsmp-apmu.c 	shmobile_smp_apmu_cpu_shutdown(cpu);
cpu               185 arch/arm/mach-shmobile/platsmp-apmu.c static void apmu_init_cpu(struct resource *res, int cpu, int bit)
cpu               189 arch/arm/mach-shmobile/platsmp-apmu.c 	if ((cpu >= ARRAY_SIZE(apmu_cpus)) || apmu_cpus[cpu].iomem)
cpu               192 arch/arm/mach-shmobile/platsmp-apmu.c 	apmu_cpus[cpu].iomem = ioremap_nocache(res->start, resource_size(res));
cpu               193 arch/arm/mach-shmobile/platsmp-apmu.c 	apmu_cpus[cpu].bit = bit;
cpu               195 arch/arm/mach-shmobile/platsmp-apmu.c 	pr_debug("apmu ioremap %d %d %pr\n", cpu, bit, res);
cpu               198 arch/arm/mach-shmobile/platsmp-apmu.c 	x = readl(apmu_cpus[cpu].iomem + DBGRCR_OFFS);
cpu               200 arch/arm/mach-shmobile/platsmp-apmu.c 	writel(x, apmu_cpus[cpu].iomem + DBGRCR_OFFS);
cpu               208 arch/arm/mach-shmobile/platsmp-apmu.c static void apmu_parse_dt(void (*fn)(struct resource *res, int cpu, int bit))
cpu               259 arch/arm/mach-shmobile/platsmp-apmu.c static int shmobile_smp_apmu_boot_secondary(unsigned int cpu,
cpu               263 arch/arm/mach-shmobile/platsmp-apmu.c 	shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_apmu), 0);
cpu               265 arch/arm/mach-shmobile/platsmp-apmu.c 	return apmu_wrap(cpu, apmu_power_on);
cpu                21 arch/arm/mach-shmobile/platsmp-scu.c static int shmobile_scu_cpu_prepare(unsigned int cpu)
cpu                24 arch/arm/mach-shmobile/platsmp-scu.c 	shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_scu),
cpu                48 arch/arm/mach-shmobile/platsmp-scu.c void shmobile_smp_scu_cpu_die(unsigned int cpu)
cpu                51 arch/arm/mach-shmobile/platsmp-scu.c 	shmobile_smp_hook(cpu, 0, 0);
cpu                63 arch/arm/mach-shmobile/platsmp-scu.c static int shmobile_smp_scu_psr_core_disabled(int cpu)
cpu                65 arch/arm/mach-shmobile/platsmp-scu.c 	unsigned long mask = SCU_PM_POWEROFF << (cpu * 8);
cpu                73 arch/arm/mach-shmobile/platsmp-scu.c int shmobile_smp_scu_cpu_kill(unsigned int cpu)
cpu                82 arch/arm/mach-shmobile/platsmp-scu.c 		if (shmobile_smp_scu_psr_core_disabled(cpu))
cpu                19 arch/arm/mach-shmobile/platsmp.c void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
cpu                21 arch/arm/mach-shmobile/platsmp.c 	shmobile_smp_fn[cpu] = 0;
cpu                24 arch/arm/mach-shmobile/platsmp.c 	shmobile_smp_mpidr[cpu] = cpu_logical_map(cpu);
cpu                25 arch/arm/mach-shmobile/platsmp.c 	shmobile_smp_fn[cpu] = fn;
cpu                26 arch/arm/mach-shmobile/platsmp.c 	shmobile_smp_arg[cpu] = arg;
cpu                31 arch/arm/mach-shmobile/platsmp.c bool shmobile_smp_cpu_can_disable(unsigned int cpu)
cpu                24 arch/arm/mach-shmobile/smp-emev2.c static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                26 arch/arm/mach-shmobile/smp-emev2.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu)));
cpu                26 arch/arm/mach-shmobile/smp-r8a7779.c static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                30 arch/arm/mach-shmobile/smp-r8a7779.c 	cpu = cpu_logical_map(cpu);
cpu                31 arch/arm/mach-shmobile/smp-r8a7779.c 	if (cpu)
cpu                32 arch/arm/mach-shmobile/smp-r8a7779.c 		ret = rcar_sysc_power_up_cpu(cpu);
cpu                47 arch/arm/mach-shmobile/smp-r8a7779.c static int r8a7779_platform_cpu_kill(unsigned int cpu)
cpu                51 arch/arm/mach-shmobile/smp-r8a7779.c 	cpu = cpu_logical_map(cpu);
cpu                52 arch/arm/mach-shmobile/smp-r8a7779.c 	if (cpu)
cpu                53 arch/arm/mach-shmobile/smp-r8a7779.c 		ret = rcar_sysc_power_down_cpu(cpu);
cpu                58 arch/arm/mach-shmobile/smp-r8a7779.c static int r8a7779_cpu_kill(unsigned int cpu)
cpu                60 arch/arm/mach-shmobile/smp-r8a7779.c 	if (shmobile_smp_scu_cpu_kill(cpu))
cpu                61 arch/arm/mach-shmobile/smp-r8a7779.c 		return r8a7779_platform_cpu_kill(cpu);
cpu                27 arch/arm/mach-shmobile/smp-sh73a0.c static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                29 arch/arm/mach-shmobile/smp-sh73a0.c 	unsigned int lcpu = cpu_logical_map(cpu);
cpu                21 arch/arm/mach-socfpga/platsmp.c static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                46 arch/arm/mach-socfpga/platsmp.c static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                92 arch/arm/mach-socfpga/platsmp.c static void socfpga_cpu_die(unsigned int cpu)
cpu               105 arch/arm/mach-socfpga/platsmp.c static int socfpga_cpu_kill(unsigned int cpu)
cpu                42 arch/arm/mach-spear/generic.h void spear13xx_cpu_die(unsigned int cpu);
cpu                54 arch/arm/mach-spear/hotplug.c static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious)
cpu                59 arch/arm/mach-spear/hotplug.c 		if (spear_pen_release == cpu) {
cpu                82 arch/arm/mach-spear/hotplug.c void spear13xx_cpu_die(unsigned int cpu)
cpu                90 arch/arm/mach-spear/hotplug.c 	spear13xx_do_lowpower(cpu, &spurious);
cpu                99 arch/arm/mach-spear/hotplug.c 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
cpu                41 arch/arm/mach-spear/platsmp.c static void spear13xx_secondary_init(unsigned int cpu)
cpu                56 arch/arm/mach-spear/platsmp.c static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                74 arch/arm/mach-spear/platsmp.c 	spear_write_pen_release(cpu);
cpu                30 arch/arm/mach-sti/platsmp.c static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                54 arch/arm/mach-sti/platsmp.c 	int cpu;
cpu                67 arch/arm/mach-sti/platsmp.c 	for_each_possible_cpu(cpu) {
cpu                69 arch/arm/mach-sti/platsmp.c 		np = of_get_cpu_node(cpu, NULL);
cpu                77 arch/arm/mach-sti/platsmp.c 				"property\n", cpu);
cpu                93 arch/arm/mach-sti/platsmp.c 		set_cpu_possible(cpu, true);
cpu                68 arch/arm/mach-sunxi/mc_smp.c #define PRCM_PWR_SWITCH_REG(c, cpu)	(0x140 + 0x10 * (c) + 0x4 * (cpu))
cpu                91 arch/arm/mach-sunxi/mc_smp.c 	int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core;
cpu                94 arch/arm/mach-sunxi/mc_smp.c 	node = of_cpu_device_node_get(cpu);
cpu                98 arch/arm/mach-sunxi/mc_smp.c 		node = of_get_cpu_node(cpu, NULL);
cpu               116 arch/arm/mach-sunxi/mc_smp.c static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster,
cpu               122 arch/arm/mach-sunxi/mc_smp.c 	reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
cpu               126 arch/arm/mach-sunxi/mc_smp.c 				 cluster, cpu);
cpu               130 arch/arm/mach-sunxi/mc_smp.c 		writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
cpu               132 arch/arm/mach-sunxi/mc_smp.c 		writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
cpu               134 arch/arm/mach-sunxi/mc_smp.c 		writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
cpu               136 arch/arm/mach-sunxi/mc_smp.c 		writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
cpu               138 arch/arm/mach-sunxi/mc_smp.c 		writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
cpu               141 arch/arm/mach-sunxi/mc_smp.c 		writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
cpu               159 arch/arm/mach-sunxi/mc_smp.c static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster)
cpu               163 arch/arm/mach-sunxi/mc_smp.c 	pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
cpu               164 arch/arm/mach-sunxi/mc_smp.c 	if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
cpu               168 arch/arm/mach-sunxi/mc_smp.c 	if (cluster == 0 && cpu == 0)
cpu               173 arch/arm/mach-sunxi/mc_smp.c 	reg &= ~PRCM_CPU_PO_RST_CTRL_CORE(cpu);
cpu               180 arch/arm/mach-sunxi/mc_smp.c 		reg &= ~(R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu));
cpu               187 arch/arm/mach-sunxi/mc_smp.c 	if (!sunxi_core_is_cortex_a15(cpu, cluster)) {
cpu               189 arch/arm/mach-sunxi/mc_smp.c 		reg &= ~CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(cpu);
cpu               195 arch/arm/mach-sunxi/mc_smp.c 	reg &= ~CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
cpu               201 arch/arm/mach-sunxi/mc_smp.c 	if (!sunxi_core_is_cortex_a15(cpu, cluster))
cpu               202 arch/arm/mach-sunxi/mc_smp.c 		reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
cpu               207 arch/arm/mach-sunxi/mc_smp.c 	sunxi_cpu_power_switch_set(cpu, cluster, true);
cpu               211 arch/arm/mach-sunxi/mc_smp.c 		if (cpu == 0)
cpu               212 arch/arm/mach-sunxi/mc_smp.c 			cpu = 4;
cpu               217 arch/arm/mach-sunxi/mc_smp.c 	reg &= ~PRCM_PWROFF_GATING_REG_CORE(cpu);
cpu               223 arch/arm/mach-sunxi/mc_smp.c 		if (cpu == 4)
cpu               224 arch/arm/mach-sunxi/mc_smp.c 			cpu = 0;
cpu               229 arch/arm/mach-sunxi/mc_smp.c 	reg |= PRCM_CPU_PO_RST_CTRL_CORE(cpu);
cpu               235 arch/arm/mach-sunxi/mc_smp.c 		reg |= R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu);
cpu               243 arch/arm/mach-sunxi/mc_smp.c 	reg |= CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
cpu               244 arch/arm/mach-sunxi/mc_smp.c 	reg |= CPUCFG_CX_RST_CTRL_CORE_RST(cpu);
cpu               245 arch/arm/mach-sunxi/mc_smp.c 	if (!sunxi_core_is_cortex_a15(cpu, cluster))
cpu               246 arch/arm/mach-sunxi/mc_smp.c 		reg |= CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
cpu               248 arch/arm/mach-sunxi/mc_smp.c 		reg |= CPUCFG_CX_RST_CTRL_CX_RST(cpu); /* NEON */
cpu               385 arch/arm/mach-sunxi/mc_smp.c static void sunxi_mc_smp_secondary_init(unsigned int cpu)
cpu               388 arch/arm/mach-sunxi/mc_smp.c 	if (cpu == 0)
cpu               394 arch/arm/mach-sunxi/mc_smp.c 	unsigned int mpidr, cpu, cluster;
cpu               397 arch/arm/mach-sunxi/mc_smp.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               402 arch/arm/mach-sunxi/mc_smp.c 	if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER)
cpu               407 arch/arm/mach-sunxi/mc_smp.c 	if (sunxi_mc_smp_cpu_table[cluster][cpu])
cpu               419 arch/arm/mach-sunxi/mc_smp.c 	sunxi_cpu_powerup(cpu, cluster);
cpu               422 arch/arm/mach-sunxi/mc_smp.c 	sunxi_mc_smp_cpu_table[cluster][cpu]++;
cpu               446 arch/arm/mach-sunxi/mc_smp.c 	unsigned int mpidr, cpu, cluster;
cpu               450 arch/arm/mach-sunxi/mc_smp.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               452 arch/arm/mach-sunxi/mc_smp.c 	pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
cpu               455 arch/arm/mach-sunxi/mc_smp.c 	sunxi_mc_smp_cpu_table[cluster][cpu]--;
cpu               456 arch/arm/mach-sunxi/mc_smp.c 	if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) {
cpu               462 arch/arm/mach-sunxi/mc_smp.c 	} else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) {
cpu               464 arch/arm/mach-sunxi/mc_smp.c 		       cluster, cpu);
cpu               481 arch/arm/mach-sunxi/mc_smp.c static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
cpu               484 arch/arm/mach-sunxi/mc_smp.c 	int gating_bit = cpu;
cpu               486 arch/arm/mach-sunxi/mc_smp.c 	pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
cpu               487 arch/arm/mach-sunxi/mc_smp.c 	if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
cpu               490 arch/arm/mach-sunxi/mc_smp.c 	if (is_a83t && cpu == 0)
cpu               500 arch/arm/mach-sunxi/mc_smp.c 	sunxi_cpu_power_switch_set(cpu, cluster, false);
cpu               536 arch/arm/mach-sunxi/mc_smp.c 	unsigned int mpidr, cpu, cluster;
cpu               542 arch/arm/mach-sunxi/mc_smp.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               547 arch/arm/mach-sunxi/mc_smp.c 		    cpu >= SUNXI_CPUS_PER_CLUSTER))
cpu               565 arch/arm/mach-sunxi/mc_smp.c 		if (sunxi_mc_smp_cpu_table[cluster][cpu])
cpu               569 arch/arm/mach-sunxi/mc_smp.c 		if (reg & CPUCFG_CX_STATUS_STANDBYWFI(cpu))
cpu               579 arch/arm/mach-sunxi/mc_smp.c 	sunxi_cpu_powerdown(cpu, cluster);
cpu               605 arch/arm/mach-sunxi/mc_smp.c 		 __func__, cluster, cpu, ret);
cpu               609 arch/arm/mach-sunxi/mc_smp.c static bool sunxi_mc_smp_cpu_can_disable(unsigned int cpu)
cpu               613 arch/arm/mach-sunxi/mc_smp.c 		if (cpu == 0)
cpu               631 arch/arm/mach-sunxi/mc_smp.c 	unsigned int mpidr, cpu, cluster;
cpu               634 arch/arm/mach-sunxi/mc_smp.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               637 arch/arm/mach-sunxi/mc_smp.c 	if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) {
cpu               641 arch/arm/mach-sunxi/mc_smp.c 	sunxi_mc_smp_cpu_table[cluster][cpu] = 1;
cpu                24 arch/arm/mach-sunxi/platsmp.c #define CPUCFG_CPU_PWR_CLAMP_STATUS_REG(cpu)	((cpu) * 0x40 + 0x64)
cpu                25 arch/arm/mach-sunxi/platsmp.c #define CPUCFG_CPU_RST_CTRL_REG(cpu)		(((cpu) + 1) * 0x40)
cpu                26 arch/arm/mach-sunxi/platsmp.c #define CPUCFG_CPU_CTRL_REG(cpu)		(((cpu) + 1) * 0x40 + 0x04)
cpu                27 arch/arm/mach-sunxi/platsmp.c #define CPUCFG_CPU_STATUS_REG(cpu)		(((cpu) + 1) * 0x40 + 0x08)
cpu                35 arch/arm/mach-sunxi/platsmp.c #define PRCM_CPU_PWR_CLAMP_REG(cpu)		(((cpu) * 4) + 0x140)
cpu                73 arch/arm/mach-sunxi/platsmp.c static int sun6i_smp_boot_secondary(unsigned int cpu,
cpu                89 arch/arm/mach-sunxi/platsmp.c 	writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
cpu                93 arch/arm/mach-sunxi/platsmp.c 	writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG);
cpu                97 arch/arm/mach-sunxi/platsmp.c 	writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG);
cpu               101 arch/arm/mach-sunxi/platsmp.c 		writel(0xff >> i, prcm_membase + PRCM_CPU_PWR_CLAMP_REG(cpu));
cpu               106 arch/arm/mach-sunxi/platsmp.c 	writel(reg & ~BIT(cpu), prcm_membase + PRCM_CPU_PWROFF_REG);
cpu               110 arch/arm/mach-sunxi/platsmp.c 	writel(3, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
cpu               114 arch/arm/mach-sunxi/platsmp.c 	writel(reg | BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG);
cpu               158 arch/arm/mach-sunxi/platsmp.c static int sun8i_smp_boot_secondary(unsigned int cpu,
cpu               173 arch/arm/mach-sunxi/platsmp.c 	writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
cpu               177 arch/arm/mach-sunxi/platsmp.c 	writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG);
cpu               181 arch/arm/mach-sunxi/platsmp.c 	writel(reg & ~BIT(cpu), prcm_membase + PRCM_CPU_PWROFF_REG);
cpu               185 arch/arm/mach-sunxi/platsmp.c 	writel(3, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
cpu                 7 arch/arm/mach-tango/platsmp.c static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                10 arch/arm/mach-tango/platsmp.c 	tango_start_aux_core(cpu);
cpu                21 arch/arm/mach-tango/platsmp.c static int tango_cpu_kill(unsigned int cpu)
cpu                27 arch/arm/mach-tango/platsmp.c 		err = tango_aux_core_kill(cpu);
cpu                35 arch/arm/mach-tango/platsmp.c static void tango_cpu_die(unsigned int cpu)
cpu                37 arch/arm/mach-tango/platsmp.c 	while (tango_aux_core_die(cpu) < 0)
cpu                40 arch/arm/mach-tango/platsmp.c 	panic("cpu %d failed to die\n", cpu);
cpu                12 arch/arm/mach-tegra/common.h extern int tegra_cpu_kill(unsigned int cpu);
cpu                13 arch/arm/mach-tegra/common.h extern void tegra_cpu_die(unsigned int cpu);
cpu               181 arch/arm/mach-tegra/cpuidle-tegra20.c 	if (dev->cpu == 0)
cpu               108 arch/arm/mach-tegra/cpuidle-tegra30.c 	if (dev->cpu == 0) {
cpu                22 arch/arm/mach-tegra/hotplug.c int tegra_cpu_kill(unsigned cpu)
cpu                24 arch/arm/mach-tegra/hotplug.c 	cpu = cpu_logical_map(cpu);
cpu                27 arch/arm/mach-tegra/hotplug.c 	tegra_wait_cpu_in_reset(cpu);
cpu                28 arch/arm/mach-tegra/hotplug.c 	tegra_disable_cpu_clock(cpu);
cpu                38 arch/arm/mach-tegra/hotplug.c void tegra_cpu_die(unsigned int cpu)
cpu                36 arch/arm/mach-tegra/platsmp.c static void tegra_secondary_init(unsigned int cpu)
cpu                38 arch/arm/mach-tegra/platsmp.c 	cpumask_set_cpu(cpu, &tegra_cpu_init_mask);
cpu                42 arch/arm/mach-tegra/platsmp.c static int tegra20_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                44 arch/arm/mach-tegra/platsmp.c 	cpu = cpu_logical_map(cpu);
cpu                54 arch/arm/mach-tegra/platsmp.c 	tegra_put_cpu_in_reset(cpu);
cpu                62 arch/arm/mach-tegra/platsmp.c 	flowctrl_write_cpu_halt(cpu, 0);
cpu                64 arch/arm/mach-tegra/platsmp.c 	tegra_enable_cpu_clock(cpu);
cpu                65 arch/arm/mach-tegra/platsmp.c 	flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */
cpu                66 arch/arm/mach-tegra/platsmp.c 	tegra_cpu_out_of_reset(cpu);
cpu                70 arch/arm/mach-tegra/platsmp.c static int tegra30_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                75 arch/arm/mach-tegra/platsmp.c 	cpu = cpu_logical_map(cpu);
cpu                76 arch/arm/mach-tegra/platsmp.c 	tegra_put_cpu_in_reset(cpu);
cpu                77 arch/arm/mach-tegra/platsmp.c 	flowctrl_write_cpu_halt(cpu, 0);
cpu                93 arch/arm/mach-tegra/platsmp.c 	if (cpumask_test_cpu(cpu, &tegra_cpu_init_mask)) {
cpu                96 arch/arm/mach-tegra/platsmp.c 			if (tegra_pmc_cpu_is_powered(cpu))
cpu               108 arch/arm/mach-tegra/platsmp.c 	ret = tegra_pmc_cpu_power_on(cpu);
cpu               114 arch/arm/mach-tegra/platsmp.c 	tegra_enable_cpu_clock(cpu);
cpu               118 arch/arm/mach-tegra/platsmp.c 	ret = tegra_pmc_cpu_remove_clamping(cpu);
cpu               124 arch/arm/mach-tegra/platsmp.c 	flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */
cpu               125 arch/arm/mach-tegra/platsmp.c 	tegra_cpu_out_of_reset(cpu);
cpu               129 arch/arm/mach-tegra/platsmp.c static int tegra114_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               133 arch/arm/mach-tegra/platsmp.c 	cpu = cpu_logical_map(cpu);
cpu               135 arch/arm/mach-tegra/platsmp.c 	if (cpumask_test_cpu(cpu, &tegra_cpu_init_mask)) {
cpu               142 arch/arm/mach-tegra/platsmp.c 		flowctrl_write_cpu_csr(cpu, 1);
cpu               143 arch/arm/mach-tegra/platsmp.c 		flowctrl_write_cpu_halt(cpu,
cpu               152 arch/arm/mach-tegra/platsmp.c 		ret = tegra_pmc_cpu_power_on(cpu);
cpu               158 arch/arm/mach-tegra/platsmp.c static int tegra_boot_secondary(unsigned int cpu,
cpu               162 arch/arm/mach-tegra/platsmp.c 		return tegra20_boot_secondary(cpu, idle);
cpu               164 arch/arm/mach-tegra/platsmp.c 		return tegra30_boot_secondary(cpu, idle);
cpu               166 arch/arm/mach-tegra/platsmp.c 		return tegra114_boot_secondary(cpu, idle);
cpu               168 arch/arm/mach-tegra/platsmp.c 		return tegra114_boot_secondary(cpu, idle);
cpu                75 arch/arm/mach-tegra/pm.c 	int cpu = smp_processor_id();
cpu                77 arch/arm/mach-tegra/pm.c 	BUG_ON(cpu != 0);
cpu                80 arch/arm/mach-tegra/pm.c 	cpu = cpu_logical_map(cpu);
cpu                86 arch/arm/mach-tegra/pm.c 	flowctrl_cpu_suspend_exit(cpu);
cpu                99 arch/arm/mach-tegra/pm.c 	int cpu = smp_processor_id();
cpu               101 arch/arm/mach-tegra/pm.c 	BUG_ON(cpu != 0);
cpu               104 arch/arm/mach-tegra/pm.c 	cpu = cpu_logical_map(cpu);
cpu               110 arch/arm/mach-tegra/pm.c 	flowctrl_cpu_suspend_enter(cpu);
cpu                52 arch/arm/mach-tegra/sleep.h 	cmp	\rcpu, #0
cpu                53 arch/arm/mach-tegra/sleep.h 	subne	\rd, \rcpu, #1
cpu                61 arch/arm/mach-tegra/sleep.h 	cmp	\rcpu, #0
cpu                62 arch/arm/mach-tegra/sleep.h 	subne	\rd, \rcpu, #1
cpu               128 arch/arm/mach-tegra/sleep.h void tegra20_cpu_shutdown(int cpu);
cpu                69 arch/arm/mach-ux500/platsmp.c static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                84 arch/arm/mach-ux500/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu                89 arch/arm/mach-ux500/platsmp.c void ux500_cpu_die(unsigned int cpu)
cpu               132 arch/arm/mach-ux500/pm.c bool prcmu_is_cpu_in_wfi(int cpu)
cpu               135 arch/arm/mach-ux500/pm.c 		(cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
cpu                38 arch/arm/mach-vexpress/dcscb.c static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster)
cpu                40 arch/arm/mach-vexpress/dcscb.c 	unsigned int rst_hold, cpumask = (1 << cpu);
cpu                42 arch/arm/mach-vexpress/dcscb.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu                68 arch/arm/mach-vexpress/dcscb.c static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
cpu                72 arch/arm/mach-vexpress/dcscb.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu                73 arch/arm/mach-vexpress/dcscb.c 	BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster]));
cpu                76 arch/arm/mach-vexpress/dcscb.c 	rst_hold |= (1 << cpu);
cpu                26 arch/arm/mach-vexpress/platsmp.c 	int cpu;
cpu                36 arch/arm/mach-vexpress/platsmp.c 	for_each_possible_cpu(cpu) {
cpu                39 arch/arm/mach-vexpress/platsmp.c 		cpu_node = of_get_cpu_node(cpu, NULL);
cpu                83 arch/arm/mach-vexpress/platsmp.c static void vexpress_cpu_die(unsigned int cpu)
cpu                85 arch/arm/mach-vexpress/platsmp.c 	versatile_immitation_cpu_die(cpu, 0x40);
cpu                60 arch/arm/mach-vexpress/spc.c #define STANDBYWFI_STAT_A15_CPU_MASK(cpu)	(1 << (cpu))
cpu                61 arch/arm/mach-vexpress/spc.c #define STANDBYWFI_STAT_A7_CPU_MASK(cpu)	(1 << (3 + (cpu)))
cpu               158 arch/arm/mach-vexpress/spc.c void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
cpu               165 arch/arm/mach-vexpress/spc.c 	mask = BIT(cpu);
cpu               187 arch/arm/mach-vexpress/spc.c void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
cpu               195 arch/arm/mach-vexpress/spc.c 		baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
cpu               197 arch/arm/mach-vexpress/spc.c 		baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
cpu               223 arch/arm/mach-vexpress/spc.c static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
cpu               226 arch/arm/mach-vexpress/spc.c 		  STANDBYWFI_STAT_A15_CPU_MASK(cpu)
cpu               227 arch/arm/mach-vexpress/spc.c 		: STANDBYWFI_STAT_A7_CPU_MASK(cpu);
cpu               242 arch/arm/mach-vexpress/spc.c int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
cpu               245 arch/arm/mach-vexpress/spc.c 	u32 mask = standbywfi_cpu_mask(cpu, cluster);
cpu               554 arch/arm/mach-vexpress/spc.c 	int cpu, cluster;
cpu               566 arch/arm/mach-vexpress/spc.c 	for_each_possible_cpu(cpu) {
cpu               567 arch/arm/mach-vexpress/spc.c 		struct device *cpu_dev = get_cpu_device(cpu);
cpu               569 arch/arm/mach-vexpress/spc.c 			pr_warn("failed to get cpu%d device\n", cpu);
cpu               574 arch/arm/mach-vexpress/spc.c 			pr_warn("failed to register cpu%d clock\n", cpu);
cpu               578 arch/arm/mach-vexpress/spc.c 			pr_warn("failed to register cpu%d clock lookup\n", cpu);
cpu               587 arch/arm/mach-vexpress/spc.c 			pr_warn("failed to initialise cpu%d opp table\n", cpu);
cpu               590 arch/arm/mach-vexpress/spc.c 			pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
cpu                13 arch/arm/mach-vexpress/spc.h void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
cpu                14 arch/arm/mach-vexpress/spc.h void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);
cpu                16 arch/arm/mach-vexpress/spc.h int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster);
cpu                33 arch/arm/mach-vexpress/tc2_pm.c #define RESET_A15_NCORERESET(cpu)	(1 << (2 + (cpu)))
cpu                34 arch/arm/mach-vexpress/tc2_pm.c #define RESET_A7_NCORERESET(cpu)	(1 << (16 + (cpu)))
cpu                48 arch/arm/mach-vexpress/tc2_pm.c static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
cpu                50 arch/arm/mach-vexpress/tc2_pm.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu                51 arch/arm/mach-vexpress/tc2_pm.c 	if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
cpu                53 arch/arm/mach-vexpress/tc2_pm.c 	ve_spc_set_resume_addr(cluster, cpu,
cpu                55 arch/arm/mach-vexpress/tc2_pm.c 	ve_spc_cpu_wakeup_irq(cluster, cpu, true);
cpu                68 arch/arm/mach-vexpress/tc2_pm.c static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
cpu                70 arch/arm/mach-vexpress/tc2_pm.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu                71 arch/arm/mach-vexpress/tc2_pm.c 	BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
cpu                72 arch/arm/mach-vexpress/tc2_pm.c 	ve_spc_cpu_wakeup_irq(cluster, cpu, true);
cpu               114 arch/arm/mach-vexpress/tc2_pm.c static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
cpu               117 arch/arm/mach-vexpress/tc2_pm.c 		  RESET_A7_NCORERESET(cpu)
cpu               118 arch/arm/mach-vexpress/tc2_pm.c 		: RESET_A15_NCORERESET(cpu);
cpu               126 arch/arm/mach-vexpress/tc2_pm.c static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
cpu               130 arch/arm/mach-vexpress/tc2_pm.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu               131 arch/arm/mach-vexpress/tc2_pm.c 	BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
cpu               135 arch/arm/mach-vexpress/tc2_pm.c 			 __func__, cpu, cluster,
cpu               146 arch/arm/mach-vexpress/tc2_pm.c 		if (tc2_core_in_reset(cpu, cluster) ||
cpu               147 arch/arm/mach-vexpress/tc2_pm.c 		    ve_spc_cpu_in_wfi(cpu, cluster))
cpu               157 arch/arm/mach-vexpress/tc2_pm.c static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
cpu               159 arch/arm/mach-vexpress/tc2_pm.c 	ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point));
cpu               162 arch/arm/mach-vexpress/tc2_pm.c static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
cpu               164 arch/arm/mach-vexpress/tc2_pm.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu               165 arch/arm/mach-vexpress/tc2_pm.c 	BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
cpu               166 arch/arm/mach-vexpress/tc2_pm.c 	ve_spc_cpu_wakeup_irq(cluster, cpu, false);
cpu               167 arch/arm/mach-vexpress/tc2_pm.c 	ve_spc_set_resume_addr(cluster, cpu, 0);
cpu               204 arch/arm/mach-vexpress/tc2_pm.c 	unsigned int mpidr, cpu, cluster;
cpu               245 arch/arm/mach-vexpress/tc2_pm.c 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               247 arch/arm/mach-vexpress/tc2_pm.c 	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
cpu               248 arch/arm/mach-vexpress/tc2_pm.c 	if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
cpu                98 arch/arm/mach-zx/platsmp.c static int zx_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu               103 arch/arm/mach-zx/platsmp.c 		arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu               146 arch/arm/mach-zx/platsmp.c static int zx_cpu_kill(unsigned int cpu)
cpu               161 arch/arm/mach-zx/platsmp.c static void zx_cpu_die(unsigned int cpu)
cpu               171 arch/arm/mach-zx/platsmp.c static void zx_secondary_init(unsigned int cpu)
cpu                14 arch/arm/mach-zynq/common.h extern void zynq_slcr_cpu_stop(int cpu);
cpu                15 arch/arm/mach-zynq/common.h extern void zynq_slcr_cpu_start(int cpu);
cpu                16 arch/arm/mach-zynq/common.h extern bool zynq_slcr_cpu_state_read(int cpu);
cpu                17 arch/arm/mach-zynq/common.h extern void zynq_slcr_cpu_state_write(int cpu, bool die);
cpu                24 arch/arm/mach-zynq/common.h extern int zynq_cpun_start(u32 address, int cpu);
cpu                29 arch/arm/mach-zynq/platsmp.c int zynq_cpun_start(u32 address, int cpu)
cpu                42 arch/arm/mach-zynq/platsmp.c 		zynq_slcr_cpu_stop(cpu);
cpu                71 arch/arm/mach-zynq/platsmp.c 		zynq_slcr_cpu_start(cpu);
cpu                76 arch/arm/mach-zynq/platsmp.c 	pr_warn("Can't start CPU%d: Wrong starting address %x\n", cpu, address);
cpu                82 arch/arm/mach-zynq/platsmp.c static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                84 arch/arm/mach-zynq/platsmp.c 	return zynq_cpun_start(__pa_symbol(secondary_startup_arm), cpu);
cpu               113 arch/arm/mach-zynq/platsmp.c static void zynq_secondary_init(unsigned int cpu)
cpu               119 arch/arm/mach-zynq/platsmp.c static int zynq_cpu_kill(unsigned cpu)
cpu               123 arch/arm/mach-zynq/platsmp.c 	while (zynq_slcr_cpu_state_read(cpu))
cpu               127 arch/arm/mach-zynq/platsmp.c 	zynq_slcr_cpu_stop(cpu);
cpu               138 arch/arm/mach-zynq/platsmp.c static void zynq_cpu_die(unsigned int cpu)
cpu               140 arch/arm/mach-zynq/platsmp.c 	zynq_slcr_cpu_state_write(cpu, true);
cpu               122 arch/arm/mach-zynq/slcr.c void zynq_slcr_cpu_start(int cpu)
cpu               127 arch/arm/mach-zynq/slcr.c 	reg &= ~(SLCR_A9_CPU_RST << cpu);
cpu               129 arch/arm/mach-zynq/slcr.c 	reg &= ~(SLCR_A9_CPU_CLKSTOP << cpu);
cpu               132 arch/arm/mach-zynq/slcr.c 	zynq_slcr_cpu_state_write(cpu, false);
cpu               139 arch/arm/mach-zynq/slcr.c void zynq_slcr_cpu_stop(int cpu)
cpu               144 arch/arm/mach-zynq/slcr.c 	reg |= (SLCR_A9_CPU_CLKSTOP | SLCR_A9_CPU_RST) << cpu;
cpu               157 arch/arm/mach-zynq/slcr.c bool zynq_slcr_cpu_state_read(int cpu)
cpu               162 arch/arm/mach-zynq/slcr.c 	state &= 1 << (31 - cpu);
cpu               175 arch/arm/mach-zynq/slcr.c void zynq_slcr_cpu_state_write(int cpu, bool die)
cpu               180 arch/arm/mach-zynq/slcr.c 	mask = 1 << (31 - cpu);
cpu               147 arch/arm/mm/cache-b15-rac.c 	unsigned int cpu;
cpu               150 arch/arm/mm/cache-b15-rac.c 	for_each_possible_cpu(cpu)
cpu               151 arch/arm/mm/cache-b15-rac.c 		enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT));
cpu               217 arch/arm/mm/cache-b15-rac.c static int b15_rac_dying_cpu(unsigned int cpu)
cpu               239 arch/arm/mm/cache-b15-rac.c static int b15_rac_dead_cpu(unsigned int cpu)
cpu               292 arch/arm/mm/cache-b15-rac.c 	int ret = 0, cpu;
cpu               353 arch/arm/mm/cache-b15-rac.c 	for_each_possible_cpu(cpu)
cpu               354 arch/arm/mm/cache-b15-rac.c 		en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
cpu               305 arch/arm/mm/cache-l2x0-pmu.c 	if (event->cpu < 0)
cpu               316 arch/arm/mm/cache-l2x0-pmu.c 	event->cpu = cpumask_first(&pmu_cpu);
cpu               424 arch/arm/mm/cache-l2x0-pmu.c static int l2x0_pmu_offline_cpu(unsigned int cpu)
cpu               428 arch/arm/mm/cache-l2x0-pmu.c 	if (!cpumask_test_and_clear_cpu(cpu, &pmu_cpu))
cpu               431 arch/arm/mm/cache-l2x0-pmu.c 	target = cpumask_any_but(cpu_online_mask, cpu);
cpu               435 arch/arm/mm/cache-l2x0-pmu.c 	perf_pmu_migrate_context(l2x0_pmu, cpu, target);
cpu               595 arch/arm/mm/cache-l2x0.c static int l2c310_starting_cpu(unsigned int cpu)
cpu               601 arch/arm/mm/cache-l2x0.c static int l2c310_dying_cpu(unsigned int cpu)
cpu               230 arch/arm/mm/cache-uniphier.c 	unsigned int cpu;
cpu               232 arch/arm/mm/cache-uniphier.c 	for_each_possible_cpu(cpu)
cpu               233 arch/arm/mm/cache-uniphier.c 		writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
cpu                54 arch/arm/mm/context.c 	int cpu;
cpu                60 arch/arm/mm/context.c 	for_each_online_cpu(cpu) {
cpu                61 arch/arm/mm/context.c 		if (cpu == this_cpu)
cpu                67 arch/arm/mm/context.c 		asid = per_cpu(active_asids, cpu).counter;
cpu                69 arch/arm/mm/context.c 			asid = per_cpu(reserved_asids, cpu);
cpu                71 arch/arm/mm/context.c 			cpumask_set_cpu(cpu, mask);
cpu               136 arch/arm/mm/context.c static void flush_context(unsigned int cpu)
cpu               167 arch/arm/mm/context.c 	int cpu;
cpu               179 arch/arm/mm/context.c 	for_each_possible_cpu(cpu) {
cpu               180 arch/arm/mm/context.c 		if (per_cpu(reserved_asids, cpu) == asid) {
cpu               182 arch/arm/mm/context.c 			per_cpu(reserved_asids, cpu) = newasid;
cpu               189 arch/arm/mm/context.c static u64 new_context(struct mm_struct *mm, unsigned int cpu)
cpu               227 arch/arm/mm/context.c 		flush_context(cpu);
cpu               240 arch/arm/mm/context.c 	unsigned int cpu = smp_processor_id();
cpu               255 arch/arm/mm/context.c 	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
cpu               262 arch/arm/mm/context.c 		asid = new_context(mm, cpu);
cpu               266 arch/arm/mm/context.c 	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
cpu               271 arch/arm/mm/context.c 	atomic64_set(&per_cpu(active_asids, cpu), asid);
cpu               272 arch/arm/mm/context.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu                43 arch/arm/mm/proc-v7-bugs.c 	int cpu = smp_processor_id();
cpu                45 arch/arm/mm/proc-v7-bugs.c 	if (per_cpu(harden_branch_predictor_fn, cpu))
cpu                55 arch/arm/mm/proc-v7-bugs.c 		per_cpu(harden_branch_predictor_fn, cpu) =
cpu                62 arch/arm/mm/proc-v7-bugs.c 		per_cpu(harden_branch_predictor_fn, cpu) =
cpu                90 arch/arm/mm/proc-v7-bugs.c 			per_cpu(harden_branch_predictor_fn, cpu) =
cpu               101 arch/arm/mm/proc-v7-bugs.c 			per_cpu(harden_branch_predictor_fn, cpu) =
cpu                95 arch/arm/plat-samsung/adc.c 	enum s3c_cpu_type cpu = platform_get_device_id(adc->pdev)->driver_data;
cpu                99 arch/arm/plat-samsung/adc.c 	if (cpu == TYPE_ADCV1 || cpu == TYPE_ADCV2)
cpu               105 arch/arm/plat-samsung/adc.c 		if (cpu == TYPE_ADCV3)
cpu               107 arch/arm/plat-samsung/adc.c 		else if (cpu == TYPE_ADCV11 || cpu == TYPE_ADCV12)
cpu               284 arch/arm/plat-samsung/adc.c 	enum s3c_cpu_type cpu = platform_get_device_id(adc->pdev)->driver_data;
cpu               298 arch/arm/plat-samsung/adc.c 	if (cpu == TYPE_ADCV1 || cpu == TYPE_ADCV11) {
cpu               325 arch/arm/plat-samsung/adc.c 	if (cpu == TYPE_ADCV2 || cpu == TYPE_ADCV3) {
cpu               337 arch/arm/plat-samsung/adc.c 	enum s3c_cpu_type cpu = platform_get_device_id(pdev)->driver_data;
cpu               389 arch/arm/plat-samsung/adc.c 	if (cpu == TYPE_ADCV12)
cpu               391 arch/arm/plat-samsung/adc.c 	if (cpu == TYPE_ADCV2 || cpu == TYPE_ADCV3)
cpu               439 arch/arm/plat-samsung/adc.c 	enum s3c_cpu_type cpu = platform_get_device_id(pdev)->driver_data;
cpu               452 arch/arm/plat-samsung/adc.c 	if (cpu == TYPE_ADCV12)
cpu               454 arch/arm/plat-samsung/adc.c 	if (cpu == TYPE_ADCV2 || cpu == TYPE_ADCV3)
cpu                29 arch/arm/plat-samsung/init.c static struct cpu_table *cpu;
cpu                46 arch/arm/plat-samsung/init.c 	cpu = s3c_lookup_cpu(idcode, cputab, cputab_size);
cpu                48 arch/arm/plat-samsung/init.c 	if (cpu == NULL) {
cpu                53 arch/arm/plat-samsung/init.c 	printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
cpu                55 arch/arm/plat-samsung/init.c 	if (cpu->init == NULL) {
cpu                56 arch/arm/plat-samsung/init.c 		printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
cpu                60 arch/arm/plat-samsung/init.c 	if (cpu->map_io)
cpu                61 arch/arm/plat-samsung/init.c 		cpu->map_io();
cpu                78 arch/arm/plat-samsung/init.c 	if (cpu == NULL)
cpu                81 arch/arm/plat-samsung/init.c 	if (cpu->init_clocks == NULL)
cpu                84 arch/arm/plat-samsung/init.c 		(cpu->init_clocks)(xtal);
cpu               136 arch/arm/plat-samsung/init.c 	if (cpu == NULL)
cpu               139 arch/arm/plat-samsung/init.c 	if (cpu->init_uarts == NULL && IS_ENABLED(CONFIG_SAMSUNG_ATAGS)) {
cpu               142 arch/arm/plat-samsung/init.c 		(cpu->init_uarts)(cfg, no);
cpu               157 arch/arm/plat-samsung/init.c 	if (cpu == NULL) {
cpu               164 arch/arm/plat-samsung/init.c 	ret = (cpu->init)();
cpu                57 arch/arm/plat-versatile/hotplug.c static inline void versatile_immitation_do_lowpower(unsigned int cpu, int *spurious)
cpu                69 arch/arm/plat-versatile/hotplug.c 		if (versatile_cpu_release == cpu_logical_map(cpu)) {
cpu                92 arch/arm/plat-versatile/hotplug.c void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask)
cpu                97 arch/arm/plat-versatile/hotplug.c 	versatile_immitation_do_lowpower(cpu, &spurious);
cpu               101 arch/arm/plat-versatile/hotplug.c 		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
cpu                11 arch/arm/plat-versatile/include/plat/platsmp.h extern void versatile_secondary_init(unsigned int cpu);
cpu                12 arch/arm/plat-versatile/include/plat/platsmp.h extern int  versatile_boot_secondary(unsigned int cpu, struct task_struct *idle);
cpu                13 arch/arm/plat-versatile/include/plat/platsmp.h void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask);
cpu                53 arch/arm/plat-versatile/platsmp.c void versatile_secondary_init(unsigned int cpu)
cpu                68 arch/arm/plat-versatile/platsmp.c int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                84 arch/arm/plat-versatile/platsmp.c 	versatile_write_cpu_release(cpu_logical_map(cpu));
cpu                91 arch/arm/plat-versatile/platsmp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu                62 arch/arm/vfp/vfpmodule.c static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
cpu                65 arch/arm/vfp/vfpmodule.c 	if (thread->vfpstate.hard.cpu != cpu)
cpu                68 arch/arm/vfp/vfpmodule.c 	return vfp_current_hw_state[cpu] == &thread->vfpstate;
cpu                76 arch/arm/vfp/vfpmodule.c static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
cpu                78 arch/arm/vfp/vfpmodule.c 	if (vfp_state_in_hw(cpu, thread)) {
cpu                80 arch/arm/vfp/vfpmodule.c 		vfp_current_hw_state[cpu] = NULL;
cpu                83 arch/arm/vfp/vfpmodule.c 	thread->vfpstate.hard.cpu = NR_CPUS;
cpu                93 arch/arm/vfp/vfpmodule.c 	unsigned int cpu;
cpu               103 arch/arm/vfp/vfpmodule.c 	cpu = get_cpu();
cpu               104 arch/arm/vfp/vfpmodule.c 	if (vfp_current_hw_state[cpu] == vfp)
cpu               105 arch/arm/vfp/vfpmodule.c 		vfp_current_hw_state[cpu] = NULL;
cpu               114 arch/arm/vfp/vfpmodule.c 	vfp->hard.cpu = NR_CPUS;
cpu               122 arch/arm/vfp/vfpmodule.c 	unsigned int cpu = get_cpu();
cpu               124 arch/arm/vfp/vfpmodule.c 	if (vfp_current_hw_state[cpu] == vfp)
cpu               125 arch/arm/vfp/vfpmodule.c 		vfp_current_hw_state[cpu] = NULL;
cpu               136 arch/arm/vfp/vfpmodule.c 	thread->vfpstate.hard.cpu = NR_CPUS;
cpu               164 arch/arm/vfp/vfpmodule.c 	unsigned int cpu;
cpu               172 arch/arm/vfp/vfpmodule.c 		cpu = thread->cpu;
cpu               179 arch/arm/vfp/vfpmodule.c 		if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
cpu               180 arch/arm/vfp/vfpmodule.c 			vfp_save_state(vfp_current_hw_state[cpu], fpexc);
cpu               461 arch/arm/vfp/vfpmodule.c 	} else if (vfp_current_hw_state[ti->cpu]) {
cpu               464 arch/arm/vfp/vfpmodule.c 		vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
cpu               470 arch/arm/vfp/vfpmodule.c 	vfp_current_hw_state[ti->cpu] = NULL;
cpu               518 arch/arm/vfp/vfpmodule.c 	unsigned int cpu = get_cpu();
cpu               520 arch/arm/vfp/vfpmodule.c 	if (vfp_state_in_hw(cpu, thread)) {
cpu               537 arch/arm/vfp/vfpmodule.c 	unsigned int cpu = get_cpu();
cpu               539 arch/arm/vfp/vfpmodule.c 	vfp_force_reload(cpu, thread);
cpu               633 arch/arm/vfp/vfpmodule.c static int vfp_dying_cpu(unsigned int cpu)
cpu               635 arch/arm/vfp/vfpmodule.c 	vfp_current_hw_state[cpu] = NULL;
cpu               673 arch/arm/vfp/vfpmodule.c 	unsigned int cpu;
cpu               682 arch/arm/vfp/vfpmodule.c 	cpu = get_cpu();
cpu               691 arch/arm/vfp/vfpmodule.c 	if (vfp_state_in_hw(cpu, thread))
cpu               694 arch/arm/vfp/vfpmodule.c 	else if (vfp_current_hw_state[cpu] != NULL)
cpu               695 arch/arm/vfp/vfpmodule.c 		vfp_save_state(vfp_current_hw_state[cpu], fpexc);
cpu               697 arch/arm/vfp/vfpmodule.c 	vfp_current_hw_state[cpu] = NULL;
cpu               136 arch/arm/xen/enlighten.c static int xen_starting_cpu(unsigned int cpu)
cpu               147 arch/arm/xen/enlighten.c 	if (per_cpu(xen_vcpu, cpu) != NULL)
cpu               150 arch/arm/xen/enlighten.c 	pr_info("Xen: initializing cpu%d\n", cpu);
cpu               151 arch/arm/xen/enlighten.c 	vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
cpu               156 arch/arm/xen/enlighten.c 	err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
cpu               159 arch/arm/xen/enlighten.c 	per_cpu(xen_vcpu, cpu) = vcpup;
cpu               161 arch/arm/xen/enlighten.c 	xen_setup_runstate_info(cpu);
cpu               168 arch/arm/xen/enlighten.c static int xen_dying_cpu(unsigned int cpu)
cpu               312 arch/arm/xen/enlighten.c 	int cpu;
cpu               362 arch/arm/xen/enlighten.c 	for_each_possible_cpu(cpu)
cpu               363 arch/arm/xen/enlighten.c 		per_cpu(xen_vcpu_id, cpu) = cpu;
cpu               731 arch/arm64/crypto/ghash-ce-glue.c MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature);
cpu                93 arch/arm64/include/asm/acpi.h #define cpu_physical_id(cpu) cpu_logical_map(cpu)
cpu               107 arch/arm64/include/asm/acpi.h struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu);
cpu               108 arch/arm64/include/asm/acpi.h static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
cpu               110 arch/arm64/include/asm/acpi.h 	return	acpi_cpu_get_madt_gicc(cpu)->uid;
cpu               122 arch/arm64/include/asm/acpi.h bool acpi_parking_protocol_valid(int cpu);
cpu               124 arch/arm64/include/asm/acpi.h acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor);
cpu               126 arch/arm64/include/asm/acpi.h static inline bool acpi_parking_protocol_valid(int cpu) { return false; }
cpu               128 arch/arm64/include/asm/acpi.h acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
cpu               132 arch/arm64/include/asm/acpi.h static inline const char *acpi_get_enable_method(int cpu)
cpu               137 arch/arm64/include/asm/acpi.h 	if (acpi_parking_protocol_valid(cpu))
cpu               160 arch/arm64/include/asm/acpi.h int acpi_numa_get_nid(unsigned int cpu);
cpu               164 arch/arm64/include/asm/acpi.h static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
cpu                16 arch/arm64/include/asm/cpu.h 	struct cpu	cpu;
cpu                63 arch/arm64/include/asm/cpu.h void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
cpu                47 arch/arm64/include/asm/cpu_ops.h 	bool		(*cpu_can_disable)(unsigned int cpu);
cpu                48 arch/arm64/include/asm/cpu_ops.h 	int		(*cpu_disable)(unsigned int cpu);
cpu                49 arch/arm64/include/asm/cpu_ops.h 	void		(*cpu_die)(unsigned int cpu);
cpu                50 arch/arm64/include/asm/cpu_ops.h 	int		(*cpu_kill)(unsigned int cpu);
cpu                59 arch/arm64/include/asm/cpu_ops.h int __init cpu_read_ops(int cpu);
cpu                 8 arch/arm64/include/asm/cpuidle.h extern int arm_cpuidle_init(unsigned int cpu);
cpu                11 arch/arm64/include/asm/cpuidle.h static inline int arm_cpuidle_init(unsigned int cpu)
cpu                25 arch/arm64/include/asm/hardirq.h #define __inc_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)++
cpu                26 arch/arm64/include/asm/hardirq.h #define __get_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)
cpu                28 arch/arm64/include/asm/hardirq.h u64 smp_irq_stat_cpu(unsigned int cpu);
cpu               111 arch/arm64/include/asm/kvm_asm.h 	get_host_ctxt \ctxt, \vcpu
cpu               112 arch/arm64/include/asm/kvm_asm.h 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
cpu               113 arch/arm64/include/asm/kvm_asm.h 	kern_hyp_va	\vcpu
cpu               549 arch/arm64/include/asm/kvm_host.h static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
cpu               550 arch/arm64/include/asm/kvm_mmu.h 	int cpu, err;
cpu               552 arch/arm64/include/asm/kvm_mmu.h 	for_each_possible_cpu(cpu) {
cpu               555 arch/arm64/include/asm/kvm_mmu.h 		ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
cpu               176 arch/arm64/include/asm/mmu_context.h void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
cpu               215 arch/arm64/include/asm/mmu_context.h 	unsigned int cpu = smp_processor_id();
cpu               226 arch/arm64/include/asm/mmu_context.h 	check_and_switch_context(next, cpu);
cpu                20 arch/arm64/include/asm/numa.h void numa_clear_node(unsigned int cpu);
cpu                36 arch/arm64/include/asm/numa.h void __init early_map_cpu_to_node(unsigned int cpu, int nid);
cpu                37 arch/arm64/include/asm/numa.h void numa_store_cpu_info(unsigned int cpu);
cpu                38 arch/arm64/include/asm/numa.h void numa_add_cpu(unsigned int cpu);
cpu                39 arch/arm64/include/asm/numa.h void numa_remove_cpu(unsigned int cpu);
cpu                43 arch/arm64/include/asm/numa.h static inline void numa_store_cpu_info(unsigned int cpu) { }
cpu                44 arch/arm64/include/asm/numa.h static inline void numa_add_cpu(unsigned int cpu) { }
cpu                45 arch/arm64/include/asm/numa.h static inline void numa_remove_cpu(unsigned int cpu) { }
cpu                47 arch/arm64/include/asm/numa.h static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
cpu                11 arch/arm64/include/asm/paravirt.h 	unsigned long long (*steal_clock)(int cpu);
cpu                20 arch/arm64/include/asm/paravirt.h static inline u64 paravirt_steal_clock(int cpu)
cpu                22 arch/arm64/include/asm/paravirt.h 	return pv_ops.time.steal_clock(cpu);
cpu                25 arch/arm64/include/asm/preempt.h #define init_idle_preempt_count(p, cpu) do { \
cpu                49 arch/arm64/include/asm/smp.h #define cpu_logical_map(cpu)    __cpu_logical_map[cpu]
cpu                97 arch/arm64/include/asm/smp.h extern void arch_send_call_function_single_ipi(int cpu);
cpu               111 arch/arm64/include/asm/smp.h extern void __cpu_die(unsigned int cpu);
cpu                36 arch/arm64/include/asm/smp_plat.h 	int cpu;
cpu                37 arch/arm64/include/asm/smp_plat.h 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
cpu                38 arch/arm64/include/asm/smp_plat.h 		if (cpu_logical_map(cpu) == mpidr)
cpu                39 arch/arm64/include/asm/smp_plat.h 			return cpu;
cpu                32 arch/arm64/kernel/acpi_numa.c int __init acpi_numa_get_nid(unsigned int cpu)
cpu                34 arch/arm64/kernel/acpi_numa.c 	return acpi_early_node_map[cpu];
cpu                39 arch/arm64/kernel/acpi_numa.c 	int cpu;
cpu                41 arch/arm64/kernel/acpi_numa.c 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
cpu                42 arch/arm64/kernel/acpi_numa.c 		if (uid == get_acpi_id_for_cpu(cpu))
cpu                43 arch/arm64/kernel/acpi_numa.c 			return cpu;
cpu                52 arch/arm64/kernel/acpi_numa.c 	int cpu, pxm, node;
cpu                73 arch/arm64/kernel/acpi_numa.c 	cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
cpu                74 arch/arm64/kernel/acpi_numa.c 	if (cpu < 0)
cpu                77 arch/arm64/kernel/acpi_numa.c 	acpi_early_node_map[cpu] = node;
cpu                79 arch/arm64/kernel/acpi_numa.c 		cpu_logical_map(cpu), node);
cpu                29 arch/arm64/kernel/acpi_parking_protocol.c void __init acpi_set_mailbox_entry(int cpu,
cpu                32 arch/arm64/kernel/acpi_parking_protocol.c 	struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
cpu                39 arch/arm64/kernel/acpi_parking_protocol.c bool acpi_parking_protocol_valid(int cpu)
cpu                41 arch/arm64/kernel/acpi_parking_protocol.c 	struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
cpu                46 arch/arm64/kernel/acpi_parking_protocol.c static int acpi_parking_protocol_cpu_init(unsigned int cpu)
cpu                49 arch/arm64/kernel/acpi_parking_protocol.c 		  cpu_mailbox_entries[cpu].mailbox_addr);
cpu                54 arch/arm64/kernel/acpi_parking_protocol.c static int acpi_parking_protocol_cpu_prepare(unsigned int cpu)
cpu                59 arch/arm64/kernel/acpi_parking_protocol.c static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
cpu                61 arch/arm64/kernel/acpi_parking_protocol.c 	struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
cpu               105 arch/arm64/kernel/acpi_parking_protocol.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu               112 arch/arm64/kernel/acpi_parking_protocol.c 	int cpu = smp_processor_id();
cpu               113 arch/arm64/kernel/acpi_parking_protocol.c 	struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
cpu               119 arch/arm64/kernel/armv8_deprecated.c static int run_all_insn_set_hw_mode(unsigned int cpu)
cpu               130 arch/arm64/kernel/armv8_deprecated.c 				cpu, insn->ops->name);
cpu                46 arch/arm64/kernel/cacheinfo.c static int __init_cache_level(unsigned int cpu)
cpu                49 arch/arm64/kernel/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu                62 arch/arm64/kernel/cacheinfo.c 		fw_level = of_find_last_cache_level(cpu);
cpu                64 arch/arm64/kernel/cacheinfo.c 		fw_level = acpi_find_last_cache_level(cpu);
cpu                81 arch/arm64/kernel/cacheinfo.c static int __populate_cache_leaves(unsigned int cpu)
cpu                85 arch/arm64/kernel/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu               137 arch/arm64/kernel/cpu_errata.c 	int cpu, slot = -1;
cpu               149 arch/arm64/kernel/cpu_errata.c 	for_each_possible_cpu(cpu) {
cpu               150 arch/arm64/kernel/cpu_errata.c 		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
cpu               151 arch/arm64/kernel/cpu_errata.c 			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
cpu                53 arch/arm64/kernel/cpu_ops.c static const char *__init cpu_read_enable_method(int cpu)
cpu                58 arch/arm64/kernel/cpu_ops.c 		struct device_node *dn = of_get_cpu_node(cpu, NULL);
cpu                61 arch/arm64/kernel/cpu_ops.c 			if (!cpu)
cpu                73 arch/arm64/kernel/cpu_ops.c 			if (cpu != 0)
cpu                79 arch/arm64/kernel/cpu_ops.c 		enable_method = acpi_get_enable_method(cpu);
cpu                87 arch/arm64/kernel/cpu_ops.c 			if (cpu != 0)
cpu                97 arch/arm64/kernel/cpu_ops.c int __init cpu_read_ops(int cpu)
cpu                99 arch/arm64/kernel/cpu_ops.c 	const char *enable_method = cpu_read_enable_method(cpu);
cpu               104 arch/arm64/kernel/cpu_ops.c 	cpu_ops[cpu] = cpu_get_ops(enable_method);
cpu               105 arch/arm64/kernel/cpu_ops.c 	if (!cpu_ops[cpu]) {
cpu               647 arch/arm64/kernel/cpufeature.c static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
cpu               656 arch/arm64/kernel/cpufeature.c 			regp->name, boot, cpu, val);
cpu               665 arch/arm64/kernel/cpufeature.c void update_cpu_features(int cpu,
cpu               676 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
cpu               684 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
cpu               688 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
cpu               697 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
cpu               699 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
cpu               705 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
cpu               707 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
cpu               715 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
cpu               717 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
cpu               719 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
cpu               725 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
cpu               727 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
cpu               730 arch/arm64/kernel/cpufeature.c 	taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
cpu               740 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
cpu               742 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
cpu               744 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
cpu               746 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
cpu               748 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
cpu               750 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
cpu               752 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
cpu               760 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
cpu               762 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
cpu               764 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
cpu               766 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
cpu               768 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
cpu               770 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
cpu               772 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
cpu               774 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
cpu               776 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
cpu               781 arch/arm64/kernel/cpufeature.c 		taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
cpu              1044 arch/arm64/kernel/cpufeature.c 	int cpu = smp_processor_id();
cpu              1057 arch/arm64/kernel/cpufeature.c 	remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
cpu              1060 arch/arm64/kernel/cpufeature.c 	if (!cpu)
cpu                19 arch/arm64/kernel/cpuidle.c int arm_cpuidle_init(unsigned int cpu)
cpu                23 arch/arm64/kernel/cpuidle.c 	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_suspend &&
cpu                24 arch/arm64/kernel/cpuidle.c 			cpu_ops[cpu]->cpu_init_idle)
cpu                25 arch/arm64/kernel/cpuidle.c 		ret = cpu_ops[cpu]->cpu_init_idle(cpu);
cpu                39 arch/arm64/kernel/cpuidle.c 	int cpu = smp_processor_id();
cpu                41 arch/arm64/kernel/cpuidle.c 	return cpu_ops[cpu]->cpu_suspend(index);
cpu                50 arch/arm64/kernel/cpuidle.c static int psci_acpi_cpu_init_idle(unsigned int cpu)
cpu                54 arch/arm64/kernel/cpuidle.c 	struct acpi_processor *pr = per_cpu(processors, cpu);
cpu                88 arch/arm64/kernel/cpuidle.c int acpi_processor_ffh_lpi_probe(unsigned int cpu)
cpu                90 arch/arm64/kernel/cpuidle.c 	return psci_acpi_cpu_init_idle(cpu);
cpu               251 arch/arm64/kernel/cpuinfo.c static int cpuid_cpu_online(unsigned int cpu)
cpu               255 arch/arm64/kernel/cpuinfo.c 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
cpu               257 arch/arm64/kernel/cpuinfo.c 	dev = get_cpu_device(cpu);
cpu               272 arch/arm64/kernel/cpuinfo.c static int cpuid_cpu_offline(unsigned int cpu)
cpu               275 arch/arm64/kernel/cpuinfo.c 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
cpu               277 arch/arm64/kernel/cpuinfo.c 	dev = get_cpu_device(cpu);
cpu               290 arch/arm64/kernel/cpuinfo.c 	int cpu, ret;
cpu               292 arch/arm64/kernel/cpuinfo.c 	for_each_possible_cpu(cpu) {
cpu               293 arch/arm64/kernel/cpuinfo.c 		struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
cpu               308 arch/arm64/kernel/cpuinfo.c 	unsigned int cpu = smp_processor_id();
cpu               324 arch/arm64/kernel/cpuinfo.c 	pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
cpu               125 arch/arm64/kernel/debug-monitors.c static int clear_os_lock(unsigned int cpu)
cpu              1404 arch/arm64/kernel/fpsimd.c static int fpsimd_cpu_dead(unsigned int cpu)
cpu              1406 arch/arm64/kernel/fpsimd.c 	per_cpu(fpsimd_last_state.st, cpu) = NULL;
cpu               933 arch/arm64/kernel/hw_breakpoint.c static int hw_breakpoint_reset(unsigned int cpu)
cpu                42 arch/arm64/kernel/irq.c 	int cpu;
cpu                45 arch/arm64/kernel/irq.c 	for_each_possible_cpu(cpu) {
cpu                46 arch/arm64/kernel/irq.c 		p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
cpu                47 arch/arm64/kernel/irq.c 		per_cpu(irq_stack_ptr, cpu) = p;
cpu                56 arch/arm64/kernel/irq.c 	int cpu;
cpu                58 arch/arm64/kernel/irq.c 	for_each_possible_cpu(cpu)
cpu                59 arch/arm64/kernel/irq.c 		per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
cpu                24 arch/arm64/kernel/psci.c static int __init cpu_psci_cpu_init(unsigned int cpu)
cpu                29 arch/arm64/kernel/psci.c static int __init cpu_psci_cpu_prepare(unsigned int cpu)
cpu                32 arch/arm64/kernel/psci.c 		pr_err("no cpu_on method, not booting CPU%d\n", cpu);
cpu                39 arch/arm64/kernel/psci.c static int cpu_psci_cpu_boot(unsigned int cpu)
cpu                41 arch/arm64/kernel/psci.c 	int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
cpu                43 arch/arm64/kernel/psci.c 		pr_err("failed to boot CPU%d (%d)\n", cpu, err);
cpu                49 arch/arm64/kernel/psci.c static bool cpu_psci_cpu_can_disable(unsigned int cpu)
cpu                51 arch/arm64/kernel/psci.c 	return !psci_tos_resident_on(cpu);
cpu                54 arch/arm64/kernel/psci.c static int cpu_psci_cpu_disable(unsigned int cpu)
cpu                61 arch/arm64/kernel/psci.c 	if (psci_tos_resident_on(cpu))
cpu                67 arch/arm64/kernel/psci.c static void cpu_psci_cpu_die(unsigned int cpu)
cpu                79 arch/arm64/kernel/psci.c 	pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
cpu                82 arch/arm64/kernel/psci.c static int cpu_psci_cpu_kill(unsigned int cpu)
cpu                98 arch/arm64/kernel/psci.c 		err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
cpu               100 arch/arm64/kernel/psci.c 			pr_info("CPU%d killed (polled %d ms)\n", cpu,
cpu               109 arch/arm64/kernel/psci.c 			cpu, err);
cpu                39 arch/arm64/kernel/sdei.c static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
cpu                43 arch/arm64/kernel/sdei.c 	p = per_cpu(*ptr, cpu);
cpu                45 arch/arm64/kernel/sdei.c 		per_cpu(*ptr, cpu) = NULL;
cpu                52 arch/arm64/kernel/sdei.c 	int cpu;
cpu                54 arch/arm64/kernel/sdei.c 	for_each_possible_cpu(cpu) {
cpu                55 arch/arm64/kernel/sdei.c 		_free_sdei_stack(&sdei_stack_normal_ptr, cpu);
cpu                56 arch/arm64/kernel/sdei.c 		_free_sdei_stack(&sdei_stack_critical_ptr, cpu);
cpu                60 arch/arm64/kernel/sdei.c static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
cpu                64 arch/arm64/kernel/sdei.c 	p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
cpu                67 arch/arm64/kernel/sdei.c 	per_cpu(*ptr, cpu) = p;
cpu                74 arch/arm64/kernel/sdei.c 	int cpu;
cpu                77 arch/arm64/kernel/sdei.c 	for_each_possible_cpu(cpu) {
cpu                78 arch/arm64/kernel/sdei.c 		err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
cpu                81 arch/arm64/kernel/sdei.c 		err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
cpu               100 arch/arm64/kernel/setup.c bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
cpu               102 arch/arm64/kernel/setup.c 	return phys_id == cpu_logical_map(cpu);
cpu               367 arch/arm64/kernel/setup.c static inline bool cpu_can_disable(unsigned int cpu)
cpu               370 arch/arm64/kernel/setup.c 	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable)
cpu               371 arch/arm64/kernel/setup.c 		return cpu_ops[cpu]->cpu_can_disable(cpu);
cpu               384 arch/arm64/kernel/setup.c 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
cpu               385 arch/arm64/kernel/setup.c 		cpu->hotpluggable = cpu_can_disable(i);
cpu               386 arch/arm64/kernel/setup.c 		register_cpu(cpu, i);
cpu                79 arch/arm64/kernel/smp.c static int op_cpu_kill(unsigned int cpu);
cpu                81 arch/arm64/kernel/smp.c static inline int op_cpu_kill(unsigned int cpu)
cpu                92 arch/arm64/kernel/smp.c static int boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                94 arch/arm64/kernel/smp.c 	if (cpu_ops[cpu]->cpu_boot)
cpu                95 arch/arm64/kernel/smp.c 		return cpu_ops[cpu]->cpu_boot(cpu);
cpu               102 arch/arm64/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *idle)
cpu               119 arch/arm64/kernel/smp.c 	ret = boot_secondary(cpu, idle);
cpu               128 arch/arm64/kernel/smp.c 		if (!cpu_online(cpu)) {
cpu               129 arch/arm64/kernel/smp.c 			pr_crit("CPU%u: failed to come online\n", cpu);
cpu               133 arch/arm64/kernel/smp.c 		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
cpu               149 arch/arm64/kernel/smp.c 					cpu, status);
cpu               153 arch/arm64/kernel/smp.c 			if (!op_cpu_kill(cpu)) {
cpu               154 arch/arm64/kernel/smp.c 				pr_crit("CPU%u: died during early boot\n", cpu);
cpu               157 arch/arm64/kernel/smp.c 			pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
cpu               160 arch/arm64/kernel/smp.c 			pr_crit("CPU%u: is stuck in kernel\n", cpu);
cpu               162 arch/arm64/kernel/smp.c 				pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
cpu               164 arch/arm64/kernel/smp.c 				pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K);
cpu               168 arch/arm64/kernel/smp.c 			panic("CPU%u detected unsupported configuration\n", cpu);
cpu               197 arch/arm64/kernel/smp.c 	unsigned int cpu;
cpu               199 arch/arm64/kernel/smp.c 	cpu = task_cpu(current);
cpu               200 arch/arm64/kernel/smp.c 	set_my_cpu_offset(per_cpu_offset(cpu));
cpu               228 arch/arm64/kernel/smp.c 	if (cpu_ops[cpu]->cpu_postboot)
cpu               229 arch/arm64/kernel/smp.c 		cpu_ops[cpu]->cpu_postboot();
cpu               239 arch/arm64/kernel/smp.c 	notify_cpu_starting(cpu);
cpu               241 arch/arm64/kernel/smp.c 	store_cpu_topology(cpu);
cpu               242 arch/arm64/kernel/smp.c 	numa_add_cpu(cpu);
cpu               250 arch/arm64/kernel/smp.c 					 cpu, (unsigned long)mpidr,
cpu               253 arch/arm64/kernel/smp.c 	set_cpu_online(cpu, true);
cpu               265 arch/arm64/kernel/smp.c static int op_cpu_disable(unsigned int cpu)
cpu               271 arch/arm64/kernel/smp.c 	if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
cpu               278 arch/arm64/kernel/smp.c 	if (cpu_ops[cpu]->cpu_disable)
cpu               279 arch/arm64/kernel/smp.c 		return cpu_ops[cpu]->cpu_disable(cpu);
cpu               289 arch/arm64/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               292 arch/arm64/kernel/smp.c 	ret = op_cpu_disable(cpu);
cpu               296 arch/arm64/kernel/smp.c 	remove_cpu_topology(cpu);
cpu               297 arch/arm64/kernel/smp.c 	numa_remove_cpu(cpu);
cpu               303 arch/arm64/kernel/smp.c 	set_cpu_online(cpu, false);
cpu               313 arch/arm64/kernel/smp.c static int op_cpu_kill(unsigned int cpu)
cpu               320 arch/arm64/kernel/smp.c 	if (!cpu_ops[cpu]->cpu_kill)
cpu               323 arch/arm64/kernel/smp.c 	return cpu_ops[cpu]->cpu_kill(cpu);
cpu               330 arch/arm64/kernel/smp.c void __cpu_die(unsigned int cpu)
cpu               334 arch/arm64/kernel/smp.c 	if (!cpu_wait_death(cpu, 5)) {
cpu               335 arch/arm64/kernel/smp.c 		pr_crit("CPU%u: cpu didn't die\n", cpu);
cpu               338 arch/arm64/kernel/smp.c 	pr_notice("CPU%u: shutdown\n", cpu);
cpu               346 arch/arm64/kernel/smp.c 	err = op_cpu_kill(cpu);
cpu               349 arch/arm64/kernel/smp.c 			cpu, err);
cpu               358 arch/arm64/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               372 arch/arm64/kernel/smp.c 	cpu_ops[cpu]->cpu_die(cpu);
cpu               384 arch/arm64/kernel/smp.c 	int cpu = smp_processor_id();
cpu               386 arch/arm64/kernel/smp.c 	pr_crit("CPU%d: will not boot\n", cpu);
cpu               389 arch/arm64/kernel/smp.c 	set_cpu_present(cpu, 0);
cpu               394 arch/arm64/kernel/smp.c 	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
cpu               395 arch/arm64/kernel/smp.c 		cpu_ops[cpu]->cpu_die(cpu);
cpu               472 arch/arm64/kernel/smp.c static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
cpu               476 arch/arm64/kernel/smp.c 	for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
cpu               486 arch/arm64/kernel/smp.c static int __init smp_cpu_setup(int cpu)
cpu               488 arch/arm64/kernel/smp.c 	if (cpu_read_ops(cpu))
cpu               491 arch/arm64/kernel/smp.c 	if (cpu_ops[cpu]->cpu_init(cpu))
cpu               494 arch/arm64/kernel/smp.c 	set_cpu_possible(cpu, true);
cpu               505 arch/arm64/kernel/smp.c struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
cpu               507 arch/arm64/kernel/smp.c 	return &cpu_madt_gicc[cpu];
cpu               715 arch/arm64/kernel/smp.c 	unsigned int cpu;
cpu               737 arch/arm64/kernel/smp.c 	for_each_possible_cpu(cpu) {
cpu               739 arch/arm64/kernel/smp.c 		per_cpu(cpu_number, cpu) = cpu;
cpu               741 arch/arm64/kernel/smp.c 		if (cpu == smp_processor_id())
cpu               744 arch/arm64/kernel/smp.c 		if (!cpu_ops[cpu])
cpu               747 arch/arm64/kernel/smp.c 		err = cpu_ops[cpu]->cpu_prepare(cpu);
cpu               751 arch/arm64/kernel/smp.c 		set_cpu_present(cpu, true);
cpu               752 arch/arm64/kernel/smp.c 		numa_store_cpu_info(cpu);
cpu               782 arch/arm64/kernel/smp.c 	unsigned int cpu, i;
cpu               787 arch/arm64/kernel/smp.c 		for_each_online_cpu(cpu)
cpu               789 arch/arm64/kernel/smp.c 				   __get_irq_stat(cpu, ipi_irqs[i]));
cpu               794 arch/arm64/kernel/smp.c u64 smp_irq_stat_cpu(unsigned int cpu)
cpu               800 arch/arm64/kernel/smp.c 		sum += __get_irq_stat(cpu, ipi_irqs[i]);
cpu               810 arch/arm64/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               812 arch/arm64/kernel/smp.c 	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
cpu               853 arch/arm64/kernel/smp.c static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
cpu               856 arch/arm64/kernel/smp.c 	crash_save_cpu(regs, cpu);
cpu               864 arch/arm64/kernel/smp.c 	if (cpu_ops[cpu]->cpu_die)
cpu               865 arch/arm64/kernel/smp.c 		cpu_ops[cpu]->cpu_die(cpu);
cpu               878 arch/arm64/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               883 arch/arm64/kernel/smp.c 		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
cpu               906 arch/arm64/kernel/smp.c 			ipi_cpu_crash_stop(cpu, regs);
cpu               930 arch/arm64/kernel/smp.c 		WARN_ONCE(!acpi_parking_protocol_valid(cpu),
cpu               932 arch/arm64/kernel/smp.c 			  cpu);
cpu               937 arch/arm64/kernel/smp.c 		pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
cpu               946 arch/arm64/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               948 arch/arm64/kernel/smp.c 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
cpu                43 arch/arm64/kernel/smp_spin_table.c static int smp_spin_table_cpu_init(unsigned int cpu)
cpu                48 arch/arm64/kernel/smp_spin_table.c 	dn = of_get_cpu_node(cpu, NULL);
cpu                56 arch/arm64/kernel/smp_spin_table.c 				   &cpu_release_addr[cpu]);
cpu                59 arch/arm64/kernel/smp_spin_table.c 		       cpu);
cpu                66 arch/arm64/kernel/smp_spin_table.c static int smp_spin_table_cpu_prepare(unsigned int cpu)
cpu                70 arch/arm64/kernel/smp_spin_table.c 	if (!cpu_release_addr[cpu])
cpu                79 arch/arm64/kernel/smp_spin_table.c 	release_addr = ioremap_cache(cpu_release_addr[cpu],
cpu               105 arch/arm64/kernel/smp_spin_table.c static int smp_spin_table_cpu_boot(unsigned int cpu)
cpu               110 arch/arm64/kernel/smp_spin_table.c 	write_pen_release(cpu_logical_map(cpu));
cpu                42 arch/arm64/kernel/suspend.c 	unsigned int cpu = smp_processor_id();
cpu                68 arch/arm64/kernel/suspend.c 		hw_breakpoint_restore(cpu);
cpu                63 arch/arm64/kernel/topology.c static bool __init acpi_cpu_is_threaded(int cpu)
cpu                65 arch/arm64/kernel/topology.c 	int is_threaded = acpi_pptt_cpu_is_thread(cpu);
cpu                83 arch/arm64/kernel/topology.c 	int cpu, topology_id;
cpu                88 arch/arm64/kernel/topology.c 	for_each_possible_cpu(cpu) {
cpu                91 arch/arm64/kernel/topology.c 		topology_id = find_acpi_cpu_topology(cpu, 0);
cpu                95 arch/arm64/kernel/topology.c 		if (acpi_cpu_is_threaded(cpu)) {
cpu                96 arch/arm64/kernel/topology.c 			cpu_topology[cpu].thread_id = topology_id;
cpu                97 arch/arm64/kernel/topology.c 			topology_id = find_acpi_cpu_topology(cpu, 1);
cpu                98 arch/arm64/kernel/topology.c 			cpu_topology[cpu].core_id   = topology_id;
cpu               100 arch/arm64/kernel/topology.c 			cpu_topology[cpu].thread_id  = -1;
cpu               101 arch/arm64/kernel/topology.c 			cpu_topology[cpu].core_id    = topology_id;
cpu               103 arch/arm64/kernel/topology.c 		topology_id = find_acpi_cpu_topology_package(cpu);
cpu               104 arch/arm64/kernel/topology.c 		cpu_topology[cpu].package_id = topology_id;
cpu               106 arch/arm64/kernel/topology.c 		i = acpi_find_last_cache_level(cpu);
cpu               113 arch/arm64/kernel/topology.c 			cache_id = find_acpi_cpu_cache_topology(cpu, i);
cpu               115 arch/arm64/kernel/topology.c 				cpu_topology[cpu].llc_id = cache_id;
cpu               268 arch/arm64/kvm/reset.c 	loaded = (vcpu->cpu != -1);
cpu               112 arch/arm64/mm/context.c 	int cpu;
cpu               124 arch/arm64/mm/context.c 	for_each_possible_cpu(cpu) {
cpu               125 arch/arm64/mm/context.c 		if (per_cpu(reserved_asids, cpu) == asid) {
cpu               127 arch/arm64/mm/context.c 			per_cpu(reserved_asids, cpu) = newasid;
cpu               183 arch/arm64/mm/context.c void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
cpu               207 arch/arm64/mm/context.c 	old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
cpu               210 arch/arm64/mm/context.c 	    atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
cpu               222 arch/arm64/mm/context.c 	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
cpu               225 arch/arm64/mm/context.c 	atomic64_set(&per_cpu(active_asids, cpu), asid);
cpu                61 arch/arm64/mm/numa.c static void numa_update_cpu(unsigned int cpu, bool remove)
cpu                63 arch/arm64/mm/numa.c 	int nid = cpu_to_node(cpu);
cpu                69 arch/arm64/mm/numa.c 		cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
cpu                71 arch/arm64/mm/numa.c 		cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
cpu                74 arch/arm64/mm/numa.c void numa_add_cpu(unsigned int cpu)
cpu                76 arch/arm64/mm/numa.c 	numa_update_cpu(cpu, false);
cpu                79 arch/arm64/mm/numa.c void numa_remove_cpu(unsigned int cpu)
cpu                81 arch/arm64/mm/numa.c 	numa_update_cpu(cpu, true);
cpu                84 arch/arm64/mm/numa.c void numa_clear_node(unsigned int cpu)
cpu                86 arch/arm64/mm/numa.c 	numa_remove_cpu(cpu);
cpu                87 arch/arm64/mm/numa.c 	set_cpu_numa_node(cpu, NUMA_NO_NODE);
cpu               118 arch/arm64/mm/numa.c void numa_store_cpu_info(unsigned int cpu)
cpu               120 arch/arm64/mm/numa.c 	set_cpu_numa_node(cpu, cpu_to_node_map[cpu]);
cpu               123 arch/arm64/mm/numa.c void __init early_map_cpu_to_node(unsigned int cpu, int nid)
cpu               129 arch/arm64/mm/numa.c 	cpu_to_node_map[cpu] = nid;
cpu               136 arch/arm64/mm/numa.c 	if (!cpu)
cpu               137 arch/arm64/mm/numa.c 		set_cpu_numa_node(cpu, nid);
cpu               144 arch/arm64/mm/numa.c static int __init early_cpu_to_node(int cpu)
cpu               146 arch/arm64/mm/numa.c 	return cpu_to_node_map[cpu];
cpu               154 arch/arm64/mm/numa.c static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
cpu               157 arch/arm64/mm/numa.c 	int nid = early_cpu_to_node(cpu);
cpu               171 arch/arm64/mm/numa.c 	unsigned int cpu;
cpu               186 arch/arm64/mm/numa.c 	for_each_possible_cpu(cpu)
cpu               187 arch/arm64/mm/numa.c 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
cpu                41 arch/c6x/include/asm/thread_info.h 	int			cpu;		/* cpu we're on */
cpu                55 arch/c6x/include/asm/thread_info.h 	.cpu		= 0,			\
cpu               463 arch/c6x/kernel/setup.c static struct cpu cpu_devices[NR_CPUS];
cpu                31 arch/csky/include/asm/asid.h #define active_asid(info, cpu)	*per_cpu_ptr((info)->active, cpu)
cpu                34 arch/csky/include/asm/asid.h 		      unsigned int cpu, struct mm_struct *mm);
cpu                43 arch/csky/include/asm/asid.h 				      atomic64_t *pasid, unsigned int cpu,
cpu                64 arch/csky/include/asm/asid.h 	old_active_asid = atomic64_read(&active_asid(info, cpu));
cpu                67 arch/csky/include/asm/asid.h 	    atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
cpu                71 arch/csky/include/asm/asid.h 	asid_new_context(info, pasid, cpu, mm);
cpu                33 arch/csky/include/asm/mmu_context.h void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
cpu                39 arch/csky/include/asm/mmu_context.h 	unsigned int cpu = smp_processor_id();
cpu                42 arch/csky/include/asm/mmu_context.h 		check_and_switch_context(next, cpu);
cpu                18 arch/csky/include/asm/smp.h void arch_send_call_function_single_ipi(int cpu);
cpu                22 arch/csky/include/asm/smp.h #define raw_smp_processor_id()	(current_thread_info()->cpu)
cpu                26 arch/csky/include/asm/smp.h void __cpu_die(unsigned int cpu);
cpu                24 arch/csky/include/asm/thread_info.h 	unsigned int		cpu;
cpu                32 arch/csky/include/asm/thread_info.h 	.cpu		= 0,			\
cpu                48 arch/csky/kernel/cpu-probe.c 	int cpu;
cpu                50 arch/csky/kernel/cpu-probe.c 	for_each_online_cpu(cpu)
cpu                51 arch/csky/kernel/cpu-probe.c 		smp_call_function_single(cpu, percpu_print, m, true);
cpu              1281 arch/csky/kernel/perf_event.c static int csky_pmu_starting_cpu(unsigned int cpu)
cpu              1287 arch/csky/kernel/perf_event.c static int csky_pmu_dying_cpu(unsigned int cpu)
cpu                91 arch/csky/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu                93 arch/csky/kernel/smp.c 	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
cpu               106 arch/csky/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               108 arch/csky/kernel/smp.c 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
cpu               139 arch/csky/kernel/smp.c 	int cpu;
cpu               145 arch/csky/kernel/smp.c 		if (of_property_read_u32(node, "reg", &cpu))
cpu               148 arch/csky/kernel/smp.c 		if (cpu >= NR_CPUS)
cpu               151 arch/csky/kernel/smp.c 		set_cpu_possible(cpu, true);
cpu               152 arch/csky/kernel/smp.c 		set_cpu_present(cpu, true);
cpu               164 arch/csky/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu               166 arch/csky/kernel/smp.c 	unsigned long mask = 1 << cpu;
cpu               182 arch/csky/kernel/smp.c 		send_arch_ipi(cpumask_of(cpu));
cpu               190 arch/csky/kernel/smp.c 	while (!cpu_online(cpu));
cpu               209 arch/csky/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               230 arch/csky/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu               232 arch/csky/kernel/smp.c 	notify_cpu_starting(cpu);
cpu               233 arch/csky/kernel/smp.c 	set_cpu_online(cpu, true);
cpu               235 arch/csky/kernel/smp.c 	pr_info("CPU%u Online: %s...\n", cpu, __func__);
cpu               245 arch/csky/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               247 arch/csky/kernel/smp.c 	set_cpu_online(cpu, false);
cpu               251 arch/csky/kernel/smp.c 	clear_tasks_mm_cpumask(cpu);
cpu               256 arch/csky/kernel/smp.c void __cpu_die(unsigned int cpu)
cpu               258 arch/csky/kernel/smp.c 	if (!cpu_wait_death(cpu, 5)) {
cpu               259 arch/csky/kernel/smp.c 		pr_crit("CPU%u: shutdown failed\n", cpu);
cpu               262 arch/csky/kernel/smp.c 	pr_notice("CPU%u: shutdown\n", cpu);
cpu                16 arch/csky/mm/asid.c #define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
cpu                57 arch/csky/mm/asid.c 	int cpu;
cpu                69 arch/csky/mm/asid.c 	for_each_possible_cpu(cpu) {
cpu                70 arch/csky/mm/asid.c 		if (reserved_asid(info, cpu) == asid) {
cpu                72 arch/csky/mm/asid.c 			reserved_asid(info, cpu) = newasid;
cpu               138 arch/csky/mm/asid.c 		      unsigned int cpu, struct mm_struct *mm)
cpu               151 arch/csky/mm/asid.c 	if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
cpu               154 arch/csky/mm/asid.c 	atomic64_set(&active_asid(info, cpu), asid);
cpu               155 arch/csky/mm/asid.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu                19 arch/csky/mm/context.c void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
cpu                21 arch/csky/mm/context.c 	asid_check_context(&asid_info, &mm->context.asid, cpu, mm);
cpu                32 arch/h8300/include/asm/thread_info.h 	int		   cpu;			/* cpu we're on */
cpu                44 arch/h8300/include/asm/thread_info.h 	.cpu =		0,			\
cpu                64 arch/h8300/kernel/asm-offsets.c 	OFFSET(TI_CPU, thread_info, cpu);
cpu               130 arch/h8300/kernel/setup.c 	char *cpu;
cpu               132 arch/h8300/kernel/setup.c 	cpu = CPU;
cpu               138 arch/h8300/kernel/setup.c 		   cpu,
cpu               205 arch/h8300/kernel/setup.c 	struct device_node *cpu;
cpu               208 arch/h8300/kernel/setup.c 	cpu = of_find_compatible_node(NULL, NULL, "renesas,h8300");
cpu               209 arch/h8300/kernel/setup.c 	of_property_read_s32(cpu, "clock-frequency", &freq);
cpu               149 arch/hexagon/include/asm/hexagon_vm.h static inline long __vmintop_affinity(long i, long cpu)
cpu               151 arch/hexagon/include/asm/hexagon_vm.h 	return __vmintop(hvmi_affinity, i, cpu, 0, 0);
cpu                13 arch/hexagon/include/asm/smp.h #define raw_smp_processor_id() (current_thread_info()->cpu)
cpu                25 arch/hexagon/include/asm/smp.h extern void arch_send_call_function_single_ipi(int cpu);
cpu                38 arch/hexagon/include/asm/thread_info.h 	__u32                   cpu;            /* current cpu */
cpu                67 arch/hexagon/include/asm/thread_info.h 	.cpu            = 0,                    \
cpu               116 arch/hexagon/kernel/setup.c 	int cpu = (unsigned long) v - 1;
cpu               119 arch/hexagon/kernel/setup.c 	if (!cpu_online(cpu))
cpu               123 arch/hexagon/kernel/setup.c 	seq_printf(m, "processor\t: %d\n", cpu);
cpu                39 arch/hexagon/kernel/smp.c 				int cpu)
cpu                84 arch/hexagon/kernel/smp.c 	int cpu = smp_processor_id();
cpu                85 arch/hexagon/kernel/smp.c 	struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
cpu                89 arch/hexagon/kernel/smp.c 		__handle_ipi(&ops, ipi, cpu);
cpu                96 arch/hexagon/kernel/smp.c 	unsigned long cpu;
cpu               101 arch/hexagon/kernel/smp.c 	for_each_cpu(cpu, cpumask) {
cpu               102 arch/hexagon/kernel/smp.c 		struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
cpu               106 arch/hexagon/kernel/smp.c 		retval = __vmintop_post(BASE_IPI_IRQ+cpu);
cpu               110 arch/hexagon/kernel/smp.c 				BASE_IPI_IRQ+cpu);
cpu               135 arch/hexagon/kernel/smp.c 	unsigned int cpu;
cpu               156 arch/hexagon/kernel/smp.c 	cpu = smp_processor_id();
cpu               158 arch/hexagon/kernel/smp.c 	setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc);
cpu               163 arch/hexagon/kernel/smp.c 	printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
cpu               165 arch/hexagon/kernel/smp.c 	notify_cpu_starting(cpu);
cpu               167 arch/hexagon/kernel/smp.c 	set_cpu_online(cpu, true);
cpu               181 arch/hexagon/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *idle)
cpu               186 arch/hexagon/kernel/smp.c 	thread->cpu = cpu;
cpu               192 arch/hexagon/kernel/smp.c 	while (!cpu_online(cpu))
cpu               220 arch/hexagon/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               222 arch/hexagon/kernel/smp.c 	send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
cpu               233 arch/hexagon/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               235 arch/hexagon/kernel/smp.c 	send_ipi(cpumask_of(cpu), IPI_CALL_FUNC);
cpu               112 arch/hexagon/kernel/time.c 	int cpu = smp_processor_id();
cpu               115 arch/hexagon/kernel/time.c 		&per_cpu(clock_events, cpu);
cpu               121 arch/hexagon/kernel/time.c 	dummy_clock_dev->cpumask = cpumask_of(cpu);
cpu               129 arch/hexagon/kernel/time.c 	int cpu = smp_processor_id();
cpu               130 arch/hexagon/kernel/time.c 	struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
cpu                48 arch/ia64/include/asm/acpi.h extern unsigned int is_cpu_cpei_target(unsigned int cpu);
cpu                49 arch/ia64/include/asm/acpi.h extern void set_cpei_target_cpu(unsigned int cpu);
cpu                78 arch/ia64/include/asm/acpi.h #define for_each_possible_early_cpu(cpu)  \
cpu                79 arch/ia64/include/asm/acpi.h 	for_each_cpu((cpu), &early_cpu_possible_map)
cpu                84 arch/ia64/include/asm/acpi.h 	int cpu;
cpu                92 arch/ia64/include/asm/acpi.h 	for (cpu = low_cpu; cpu < high_cpu; cpu++) {
cpu                93 arch/ia64/include/asm/acpi.h 		cpumask_set_cpu(cpu, &early_cpu_possible_map);
cpu                94 arch/ia64/include/asm/acpi.h 		if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
cpu                95 arch/ia64/include/asm/acpi.h 			node_cpuid[cpu].nid = next_nid;
cpu                11 arch/ia64/include/asm/cpu.h 	struct cpu cpu;
cpu               124 arch/ia64/include/asm/hw_irq.h extern void __setup_vector_irq(int cpu);
cpu               125 arch/ia64/include/asm/hw_irq.h extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
cpu               130 arch/ia64/include/asm/hw_irq.h extern int irq_prepare_move(int irq, int cpu);
cpu               133 arch/ia64/include/asm/hw_irq.h static inline int irq_prepare_move(int irq, int cpu) { return 0; }
cpu                33 arch/ia64/include/asm/msidef.h #define     MSI_ADDR_DEST_ID_CPU(cpu)	((cpu) << MSI_ADDR_DEST_ID_SHIFT)
cpu                72 arch/ia64/include/asm/numa.h extern void map_cpu_to_node(int cpu, int nid);
cpu                73 arch/ia64/include/asm/numa.h extern void unmap_cpu_from_node(int cpu, int nid);
cpu                74 arch/ia64/include/asm/numa.h extern void numa_clear_node(int cpu);
cpu                77 arch/ia64/include/asm/numa.h #define map_cpu_to_node(cpu, nid)	do{}while(0)
cpu                78 arch/ia64/include/asm/numa.h #define unmap_cpu_from_node(cpu, nid)	do{}while(0)
cpu                80 arch/ia64/include/asm/numa.h #define numa_clear_node(cpu)	do { } while (0)
cpu                70 arch/ia64/include/asm/perfmon.h 	int		(*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
cpu                71 arch/ia64/include/asm/perfmon.h 	int		(*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
cpu                72 arch/ia64/include/asm/perfmon.h 	int 		(*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
cpu               207 arch/ia64/include/asm/processor.h 	int cpu;
cpu               242 arch/ia64/include/asm/processor.h #define cpu_data(cpu)		(&per_cpu(ia64_cpu_info, cpu))
cpu                52 arch/ia64/include/asm/smp.h #define raw_smp_processor_id() (current_thread_info()->cpu)
cpu               117 arch/ia64/include/asm/smp.h extern void __cpu_die (unsigned int cpu);
cpu               125 arch/ia64/include/asm/smp.h extern void smp_send_reschedule (int cpu);
cpu               129 arch/ia64/include/asm/smp.h extern void arch_send_call_function_single_ipi(int cpu);
cpu                27 arch/ia64/include/asm/thread_info.h 	__u32 cpu;			/* current CPU */
cpu                50 arch/ia64/include/asm/thread_info.h 	.cpu		= 0,			\
cpu                42 arch/ia64/include/asm/topology.h #define topology_physical_package_id(cpu)	(cpu_data(cpu)->socket_id)
cpu                43 arch/ia64/include/asm/topology.h #define topology_core_id(cpu)			(cpu_data(cpu)->core_id)
cpu                44 arch/ia64/include/asm/topology.h #define topology_core_cpumask(cpu)		(&cpu_core_map[cpu])
cpu                45 arch/ia64/include/asm/topology.h #define topology_sibling_cpumask(cpu)		(&per_cpu(cpu_sibling_map, cpu))
cpu               112 arch/ia64/include/asm/uv/uv_hub.h #define uv_cpu_hub_info(cpu)	(&per_cpu(__uv_hub_info, cpu))
cpu               261 arch/ia64/include/asm/uv/uv_hub.h static inline int uv_cpu_to_blade_id(int cpu)
cpu               291 arch/ia64/include/asm/uv/uv_hub.h static inline int uv_cpu_to_pnode(int cpu)
cpu                71 arch/ia64/include/uapi/asm/perfmon_default_smpl.h         unsigned short  cpu;                    /* cpu on which the overflow occurred */
cpu               219 arch/ia64/kernel/acpi.c unsigned int is_cpu_cpei_target(unsigned int cpu)
cpu               225 arch/ia64/kernel/acpi.c 	if (logical_id == cpu)
cpu               231 arch/ia64/kernel/acpi.c void set_cpei_target_cpu(unsigned int cpu)
cpu               233 arch/ia64/kernel/acpi.c 	acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
cpu               674 arch/ia64/kernel/acpi.c 		int cpu, i = 1;
cpu               675 arch/ia64/kernel/acpi.c 		for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
cpu               676 arch/ia64/kernel/acpi.c 			if (smp_boot_data.cpu_phys_id[cpu] !=
cpu               679 arch/ia64/kernel/acpi.c 				    smp_boot_data.cpu_phys_id[cpu];
cpu               714 arch/ia64/kernel/acpi.c int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
cpu               724 arch/ia64/kernel/acpi.c 	node_cpuid[cpu].phys_id = physid;
cpu               725 arch/ia64/kernel/acpi.c 	node_cpuid[cpu].nid = acpi_get_node(handle);
cpu               786 arch/ia64/kernel/acpi.c 	int cpu;
cpu               789 arch/ia64/kernel/acpi.c 	cpu = cpumask_first(&tmp_map);
cpu               790 arch/ia64/kernel/acpi.c 	if (cpu >= nr_cpu_ids)
cpu               793 arch/ia64/kernel/acpi.c 	acpi_map_cpu2node(handle, cpu, physid);
cpu               795 arch/ia64/kernel/acpi.c 	set_cpu_present(cpu, true);
cpu               796 arch/ia64/kernel/acpi.c 	ia64_cpu_to_sapicid[cpu] = physid;
cpu               800 arch/ia64/kernel/acpi.c 	*pcpu = cpu;
cpu               812 arch/ia64/kernel/acpi.c int acpi_unmap_cpu(int cpu)
cpu               814 arch/ia64/kernel/acpi.c 	ia64_cpu_to_sapicid[cpu] = -1;
cpu               815 arch/ia64/kernel/acpi.c 	set_cpu_present(cpu, false);
cpu                40 arch/ia64/kernel/asm-offsets.c 	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
cpu                41 arch/ia64/kernel/crash.c 	int cpu = smp_processor_id();
cpu                42 arch/ia64/kernel/crash.c 	struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
cpu                55 arch/ia64/kernel/crash.c 	buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
cpu                61 arch/ia64/kernel/err_inject.c 	u32 cpu=dev->id;						\
cpu                62 arch/ia64/kernel/err_inject.c 	return sprintf(buf, "%lx\n", name[cpu]);			\
cpu                70 arch/ia64/kernel/err_inject.c 	unsigned int cpu=dev->id;					\
cpu                71 arch/ia64/kernel/err_inject.c 	name[cpu] = simple_strtoull(buf, NULL, 16);			\
cpu                84 arch/ia64/kernel/err_inject.c 	unsigned int cpu=dev->id;
cpu                88 arch/ia64/kernel/err_inject.c 	printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
cpu                89 arch/ia64/kernel/err_inject.c 	printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
cpu                90 arch/ia64/kernel/err_inject.c 	printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
cpu                92 arch/ia64/kernel/err_inject.c 			  err_data_buffer[cpu].data1,
cpu                93 arch/ia64/kernel/err_inject.c 			  err_data_buffer[cpu].data2,
cpu                94 arch/ia64/kernel/err_inject.c 			  err_data_buffer[cpu].data3);
cpu               100 arch/ia64/kernel/err_inject.c 		status[cpu]=ia64_pal_mc_error_inject_phys(err_type_info[cpu],
cpu               101 arch/ia64/kernel/err_inject.c 					err_struct_info[cpu],
cpu               102 arch/ia64/kernel/err_inject.c 					ia64_tpa(&err_data_buffer[cpu]),
cpu               103 arch/ia64/kernel/err_inject.c 					&capabilities[cpu],
cpu               104 arch/ia64/kernel/err_inject.c 			 		&resources[cpu]);
cpu               107 arch/ia64/kernel/err_inject.c 		status[cpu]=ia64_pal_mc_error_inject_virt(err_type_info[cpu],
cpu               108 arch/ia64/kernel/err_inject.c 					err_struct_info[cpu],
cpu               109 arch/ia64/kernel/err_inject.c 					ia64_tpa(&err_data_buffer[cpu]),
cpu               110 arch/ia64/kernel/err_inject.c 					&capabilities[cpu],
cpu               111 arch/ia64/kernel/err_inject.c 			 		&resources[cpu]);
cpu               114 arch/ia64/kernel/err_inject.c 		status[cpu] = -EINVAL;
cpu               119 arch/ia64/kernel/err_inject.c 	printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
cpu               120 arch/ia64/kernel/err_inject.c 	printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
cpu               121 arch/ia64/kernel/err_inject.c 	printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
cpu               133 arch/ia64/kernel/err_inject.c 	unsigned int cpu=dev->id;
cpu               134 arch/ia64/kernel/err_inject.c 	return sprintf(buf, "%lx\n", phys_addr[cpu]);
cpu               141 arch/ia64/kernel/err_inject.c 	unsigned int cpu=dev->id;
cpu               153 arch/ia64/kernel/err_inject.c 	phys_addr[cpu] = ia64_tpa(virt_addr);
cpu               164 arch/ia64/kernel/err_inject.c 	unsigned int cpu=dev->id;
cpu               167 arch/ia64/kernel/err_inject.c 			err_data_buffer[cpu].data1,
cpu               168 arch/ia64/kernel/err_inject.c 			err_data_buffer[cpu].data2,
cpu               169 arch/ia64/kernel/err_inject.c 			err_data_buffer[cpu].data3);
cpu               177 arch/ia64/kernel/err_inject.c 	unsigned int cpu=dev->id;
cpu               182 arch/ia64/kernel/err_inject.c 		 err_data_buffer[cpu].data1,
cpu               183 arch/ia64/kernel/err_inject.c 		 err_data_buffer[cpu].data2,
cpu               184 arch/ia64/kernel/err_inject.c 		 err_data_buffer[cpu].data3,
cpu               185 arch/ia64/kernel/err_inject.c 		 cpu);
cpu               188 arch/ia64/kernel/err_inject.c 			&err_data_buffer[cpu].data1,
cpu               189 arch/ia64/kernel/err_inject.c 			&err_data_buffer[cpu].data2,
cpu               190 arch/ia64/kernel/err_inject.c 			&err_data_buffer[cpu].data3);
cpu               227 arch/ia64/kernel/err_inject.c static int err_inject_add_dev(unsigned int cpu)
cpu               229 arch/ia64/kernel/err_inject.c 	struct device *sys_dev = get_cpu_device(cpu);
cpu               234 arch/ia64/kernel/err_inject.c static int err_inject_remove_dev(unsigned int cpu)
cpu               236 arch/ia64/kernel/err_inject.c 	struct device *sys_dev = get_cpu_device(cpu);
cpu               330 arch/ia64/kernel/iosapic.c 	int cpu, dest, rte_index;
cpu               337 arch/ia64/kernel/iosapic.c 	cpu = cpumask_first_and(cpu_online_mask, mask);
cpu               338 arch/ia64/kernel/iosapic.c 	if (cpu >= nr_cpu_ids)
cpu               341 arch/ia64/kernel/iosapic.c 	if (irq_prepare_move(irq, cpu))
cpu               344 arch/ia64/kernel/iosapic.c 	dest = cpu_physical_id(cpu);
cpu               620 arch/ia64/kernel/iosapic.c 	static int cpu = -1;
cpu               686 arch/ia64/kernel/iosapic.c 		if (++cpu >= nr_cpu_ids)
cpu               687 arch/ia64/kernel/iosapic.c 			cpu = 0;
cpu               688 arch/ia64/kernel/iosapic.c 	} while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain));
cpu               690 arch/ia64/kernel/iosapic.c 	return cpu_physical_id(cpu);
cpu                62 arch/ia64/kernel/irq_ia64.c static cpumask_t vector_allocation_domain(int cpu);
cpu               127 arch/ia64/kernel/irq_ia64.c 	int cpu;
cpu               140 arch/ia64/kernel/irq_ia64.c 	for_each_cpu(cpu, &mask)
cpu               141 arch/ia64/kernel/irq_ia64.c 		per_cpu(vector_irq, cpu)[vector] = irq;
cpu               162 arch/ia64/kernel/irq_ia64.c 	int vector, cpu;
cpu               170 arch/ia64/kernel/irq_ia64.c 	for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
cpu               171 arch/ia64/kernel/irq_ia64.c 		per_cpu(vector_irq, cpu)[vector] = -1;
cpu               191 arch/ia64/kernel/irq_ia64.c 	int vector, cpu;
cpu               197 arch/ia64/kernel/irq_ia64.c 	for_each_online_cpu(cpu) {
cpu               198 arch/ia64/kernel/irq_ia64.c 		domain = vector_allocation_domain(cpu);
cpu               235 arch/ia64/kernel/irq_ia64.c void __setup_vector_irq(int cpu)
cpu               241 arch/ia64/kernel/irq_ia64.c 		per_cpu(vector_irq, cpu)[vector] = -1;
cpu               244 arch/ia64/kernel/irq_ia64.c 		if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
cpu               247 arch/ia64/kernel/irq_ia64.c 		per_cpu(vector_irq, cpu)[vector] = irq;
cpu               258 arch/ia64/kernel/irq_ia64.c static cpumask_t vector_allocation_domain(int cpu)
cpu               261 arch/ia64/kernel/irq_ia64.c 		return *cpumask_of(cpu);
cpu               265 arch/ia64/kernel/irq_ia64.c static int __irq_prepare_move(int irq, int cpu)
cpu               273 arch/ia64/kernel/irq_ia64.c 	if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
cpu               275 arch/ia64/kernel/irq_ia64.c 	if (cpumask_test_cpu(cpu, &cfg->domain))
cpu               277 arch/ia64/kernel/irq_ia64.c 	domain = vector_allocation_domain(cpu);
cpu               289 arch/ia64/kernel/irq_ia64.c int irq_prepare_move(int irq, int cpu)
cpu               295 arch/ia64/kernel/irq_ia64.c 	ret = __irq_prepare_move(irq, cpu);
cpu               371 arch/ia64/kernel/irq_ia64.c static cpumask_t vector_allocation_domain(int cpu)
cpu               395 arch/ia64/kernel/irq_ia64.c 	int irq, vector, cpu;
cpu               400 arch/ia64/kernel/irq_ia64.c 	for_each_online_cpu(cpu) {
cpu               401 arch/ia64/kernel/irq_ia64.c 		domain = vector_allocation_domain(cpu);
cpu               649 arch/ia64/kernel/irq_ia64.c ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
cpu               655 arch/ia64/kernel/irq_ia64.c 	phys_cpu_id = cpu_physical_id(cpu);
cpu               736 arch/ia64/kernel/mca.c ia64_mca_wakeup(int cpu)
cpu               738 arch/ia64/kernel/mca.c 	ia64_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
cpu               752 arch/ia64/kernel/mca.c 	int cpu;
cpu               755 arch/ia64/kernel/mca.c 	for_each_online_cpu(cpu) {
cpu               756 arch/ia64/kernel/mca.c 		if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
cpu               757 arch/ia64/kernel/mca.c 			ia64_mca_wakeup(cpu);
cpu               779 arch/ia64/kernel/mca.c 	int cpu = smp_processor_id();
cpu               788 arch/ia64/kernel/mca.c 	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
cpu               802 arch/ia64/kernel/mca.c 	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
cpu               887 arch/ia64/kernel/mca.c 			task_thread_info(previous_current)->cpu);
cpu               986 arch/ia64/kernel/mca.c 	int cpu = smp_processor_id();
cpu               988 arch/ia64/kernel/mca.c 	previous_current = curr_task(cpu);
cpu               989 arch/ia64/kernel/mca.c 	ia64_set_curr_task(cpu, current);
cpu              1228 arch/ia64/kernel/mca.c 	int cpu = smp_processor_id();
cpu              1230 arch/ia64/kernel/mca.c 	if (!ia64_idtrs[cpu])
cpu              1235 arch/ia64/kernel/mca.c 		p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
cpu              1285 arch/ia64/kernel/mca.c 	int recover, cpu = smp_processor_id();
cpu              1293 arch/ia64/kernel/mca.c 		monarch_cpu = cpu;
cpu              1296 arch/ia64/kernel/mca.c 		cpumask_set_cpu(cpu, &mca_cpu);
cpu              1300 arch/ia64/kernel/mca.c 		"monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
cpu              1306 arch/ia64/kernel/mca.c 	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
cpu              1308 arch/ia64/kernel/mca.c 		ia64_wait_for_slaves(cpu, "MCA");
cpu              1319 arch/ia64/kernel/mca.c 		while (cpumask_test_cpu(cpu, &mca_cpu))
cpu              1363 arch/ia64/kernel/mca.c 				ia64_set_curr_task(cpu, previous_current);
cpu              1364 arch/ia64/kernel/mca.c 				ia64_mc_info.imi_rendez_checkin[cpu]
cpu              1370 arch/ia64/kernel/mca.c 	ia64_set_curr_task(cpu, previous_current);
cpu              1371 arch/ia64/kernel/mca.c 	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
cpu              1666 arch/ia64/kernel/mca.c 	int cpu = smp_processor_id();
cpu              1673 arch/ia64/kernel/mca.c 		sos->proc_state_param, cpu, sos->monarch);
cpu              1686 arch/ia64/kernel/mca.c 		        __func__, cpu);
cpu              1698 arch/ia64/kernel/mca.c 			       __func__, cpu);
cpu              1704 arch/ia64/kernel/mca.c 		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
cpu              1727 arch/ia64/kernel/mca.c 		mprintk("Slave on cpu %d returning to normal service.\n", cpu);
cpu              1728 arch/ia64/kernel/mca.c 		ia64_set_curr_task(cpu, previous_current);
cpu              1729 arch/ia64/kernel/mca.c 		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
cpu              1734 arch/ia64/kernel/mca.c 	monarch_cpu = cpu;
cpu              1745 arch/ia64/kernel/mca.c 	ia64_wait_for_slaves(cpu, "INIT");
cpu              1753 arch/ia64/kernel/mca.c 	mprintk("\nINIT dump complete.  Monarch on cpu %d returning to normal service.\n", cpu);
cpu              1755 arch/ia64/kernel/mca.c 	ia64_set_curr_task(cpu, previous_current);
cpu              1807 arch/ia64/kernel/mca.c 		const char *type, int cpu)
cpu              1816 arch/ia64/kernel/mca.c 	ti->cpu = cpu;
cpu              1819 arch/ia64/kernel/mca.c 	cpumask_set_cpu(cpu, &p->cpus_mask);
cpu              1840 arch/ia64/kernel/mca.c 	int cpu = smp_processor_id();
cpu              1847 arch/ia64/kernel/mca.c 	if (__per_cpu_mca[cpu]) {
cpu              1848 arch/ia64/kernel/mca.c 		data = __va(__per_cpu_mca[cpu]);
cpu              1858 arch/ia64/kernel/mca.c 					cpu);
cpu              1861 arch/ia64/kernel/mca.c 		"MCA", cpu);
cpu              1863 arch/ia64/kernel/mca.c 		"INIT", cpu);
cpu              1864 arch/ia64/kernel/mca.c 	__this_cpu_write(ia64_mca_data, (__per_cpu_mca[cpu] = __pa(data)));
cpu              1886 arch/ia64/kernel/mca.c static int ia64_mca_cpu_online(unsigned int cpu)
cpu                21 arch/ia64/kernel/msi_ia64.c 	int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
cpu                24 arch/ia64/kernel/msi_ia64.c 	if (irq_prepare_move(irq, cpu))
cpu                31 arch/ia64/kernel/msi_ia64.c 	addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
cpu                40 arch/ia64/kernel/msi_ia64.c 	cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu));
cpu               122 arch/ia64/kernel/msi_ia64.c 	int cpu = cpumask_first_and(mask, cpu_online_mask);
cpu               124 arch/ia64/kernel/msi_ia64.c 	if (irq_prepare_move(irq, cpu))
cpu               132 arch/ia64/kernel/msi_ia64.c 	msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
cpu                21 arch/ia64/kernel/numa.c void map_cpu_to_node(int cpu, int nid)
cpu                25 arch/ia64/kernel/numa.c 		cpu_to_node_map[cpu] = 0;
cpu                29 arch/ia64/kernel/numa.c 	oldnid = cpu_to_node_map[cpu];
cpu                30 arch/ia64/kernel/numa.c 	if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) {
cpu                37 arch/ia64/kernel/numa.c 	cpu_to_node_map[cpu] = nid;
cpu                38 arch/ia64/kernel/numa.c 	cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]);
cpu                42 arch/ia64/kernel/numa.c void unmap_cpu_from_node(int cpu, int nid)
cpu                44 arch/ia64/kernel/numa.c 	WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid]));
cpu                45 arch/ia64/kernel/numa.c 	WARN_ON(cpu_to_node_map[cpu] != nid);
cpu                46 arch/ia64/kernel/numa.c 	cpu_to_node_map[cpu] = 0;
cpu                47 arch/ia64/kernel/numa.c 	cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]);
cpu                59 arch/ia64/kernel/numa.c 	int cpu, i, node;
cpu                64 arch/ia64/kernel/numa.c 	for_each_possible_early_cpu(cpu) {
cpu                67 arch/ia64/kernel/numa.c 			if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
cpu                71 arch/ia64/kernel/numa.c 		map_cpu_to_node(cpu, node);
cpu               924 arch/ia64/kernel/palinfo.c static int palinfo_add_proc(unsigned int cpu)
cpu               930 arch/ia64/kernel/palinfo.c 	sprintf(cpustr, "cpu%d", cpu);
cpu               936 arch/ia64/kernel/palinfo.c 	f.req_cpu = cpu;
cpu              1159 arch/ia64/kernel/perfmon.c pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
cpu              1162 arch/ia64/kernel/perfmon.c 	if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
cpu              1169 arch/ia64/kernel/perfmon.c 		     int cpu, void *arg)
cpu              1172 arch/ia64/kernel/perfmon.c 	if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
cpu              1178 arch/ia64/kernel/perfmon.c 		     int cpu, void *arg)
cpu              1181 arch/ia64/kernel/perfmon.c 	if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
cpu              1284 arch/ia64/kernel/perfmon.c pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
cpu              1297 arch/ia64/kernel/perfmon.c 		cpu));
cpu              1309 arch/ia64/kernel/perfmon.c 		if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
cpu              1311 arch/ia64/kernel/perfmon.c 		DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
cpu              1313 arch/ia64/kernel/perfmon.c 		pfm_sessions.pfs_sys_session[cpu] = task;
cpu              1327 arch/ia64/kernel/perfmon.c 		cpu));
cpu              1340 arch/ia64/kernel/perfmon.c   		task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
cpu              1341 arch/ia64/kernel/perfmon.c 		cpu));
cpu              1350 arch/ia64/kernel/perfmon.c pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
cpu              1363 arch/ia64/kernel/perfmon.c 		cpu));
cpu              1367 arch/ia64/kernel/perfmon.c 		pfm_sessions.pfs_sys_session[cpu] = NULL;
cpu              1387 arch/ia64/kernel/perfmon.c 		cpu));
cpu              2369 arch/ia64/kernel/perfmon.c 		     unsigned int cpu, pfarg_context_t *arg)
cpu              2390 arch/ia64/kernel/perfmon.c 	ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
cpu              2392 arch/ia64/kernel/perfmon.c 	DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
cpu              2403 arch/ia64/kernel/perfmon.c 	ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
cpu              2416 arch/ia64/kernel/perfmon.c 	ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
cpu              5591 arch/ia64/kernel/perfmon.c 	int cpu;
cpu              5600 arch/ia64/kernel/perfmon.c 	cpu = (long)v - 1;
cpu              5616 arch/ia64/kernel/perfmon.c 		cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
cpu              5617 arch/ia64/kernel/perfmon.c 		cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
cpu              5618 arch/ia64/kernel/perfmon.c 		cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
cpu              5619 arch/ia64/kernel/perfmon.c 		cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
cpu              5620 arch/ia64/kernel/perfmon.c 		cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
cpu              5621 arch/ia64/kernel/perfmon.c 		cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
cpu              5622 arch/ia64/kernel/perfmon.c 		cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
cpu              5623 arch/ia64/kernel/perfmon.c 		cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
cpu              5624 arch/ia64/kernel/perfmon.c 		cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
cpu              5625 arch/ia64/kernel/perfmon.c 		cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
cpu              5626 arch/ia64/kernel/perfmon.c 		cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
cpu              5627 arch/ia64/kernel/perfmon.c 		cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
cpu              5628 arch/ia64/kernel/perfmon.c 		cpu, pfm_get_cpu_data(pmu_ctx, cpu),
cpu              5629 arch/ia64/kernel/perfmon.c 		cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
cpu              5640 arch/ia64/kernel/perfmon.c 			cpu, psr,
cpu              5641 arch/ia64/kernel/perfmon.c 			cpu, ia64_get_pmc(0));
cpu              5648 arch/ia64/kernel/perfmon.c 				cpu, i, ia64_get_pmc(i),
cpu              5649 arch/ia64/kernel/perfmon.c 				cpu, i, ia64_get_pmd(i));
cpu                42 arch/ia64/kernel/perfmon_default_smpl.c default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
cpu                52 arch/ia64/kernel/perfmon_default_smpl.c 	DPRINT(("[%d] validate flags=0x%x CPU%d\n", task_pid_nr(task), flags, cpu));
cpu                65 arch/ia64/kernel/perfmon_default_smpl.c default_get_size(struct task_struct *task, unsigned int flags, int cpu, void *data, unsigned long *size)
cpu                78 arch/ia64/kernel/perfmon_default_smpl.c default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *data)
cpu               170 arch/ia64/kernel/perfmon_default_smpl.c 	ent->cpu       = smp_processor_id();
cpu               650 arch/ia64/kernel/process.c 	int cpu;
cpu               652 arch/ia64/kernel/process.c 	for_each_online_cpu(cpu) {
cpu               653 arch/ia64/kernel/process.c 		if (cpu != smp_processor_id())
cpu               654 arch/ia64/kernel/process.c 			cpu_down(cpu);
cpu               238 arch/ia64/kernel/sal.c 	int cpu;
cpu               245 arch/ia64/kernel/sal.c 	cpu = get_cpu();
cpu               252 arch/ia64/kernel/sal.c 	ia64_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0);
cpu                97 arch/ia64/kernel/salinfo.c 	int			cpu;
cpu               237 arch/ia64/kernel/salinfo.c 			data_saved->cpu = smp_processor_id();
cpu               288 arch/ia64/kernel/salinfo.c 	int i, n, cpu = -1;
cpu               306 arch/ia64/kernel/salinfo.c 			cpu = n;
cpu               313 arch/ia64/kernel/salinfo.c 	if (cpu == -1)
cpu               319 arch/ia64/kernel/salinfo.c 	data->cpu_check = cpu;
cpu               323 arch/ia64/kernel/salinfo.c 	snprintf(cmd, sizeof(cmd), "read %d\n", cpu);
cpu               396 arch/ia64/kernel/salinfo.c salinfo_log_new_read(int cpu, struct salinfo_data *data)
cpu               407 arch/ia64/kernel/salinfo.c 		if (data_saved->buffer && data_saved->cpu == cpu) {
cpu               424 arch/ia64/kernel/salinfo.c 		work_on_cpu_safe(cpu, salinfo_log_read_cpu, data);
cpu               427 arch/ia64/kernel/salinfo.c 		cpumask_clear_cpu(cpu, &data->cpu_event);
cpu               463 arch/ia64/kernel/salinfo.c salinfo_log_clear(struct salinfo_data *data, int cpu)
cpu               469 arch/ia64/kernel/salinfo.c 	if (!cpumask_test_cpu(cpu, &data->cpu_event)) {
cpu               473 arch/ia64/kernel/salinfo.c 	cpumask_clear_cpu(cpu, &data->cpu_event);
cpu               482 arch/ia64/kernel/salinfo.c 		work_on_cpu_safe(cpu, salinfo_log_clear_cpu, data);
cpu               484 arch/ia64/kernel/salinfo.c 	salinfo_log_new_read(cpu, data);
cpu               487 arch/ia64/kernel/salinfo.c 		cpumask_set_cpu(cpu, &data->cpu_event);
cpu               501 arch/ia64/kernel/salinfo.c 	int cpu;
cpu               509 arch/ia64/kernel/salinfo.c 	if (sscanf(cmd, "read %d", &cpu) == 1) {
cpu               510 arch/ia64/kernel/salinfo.c 		salinfo_log_new_read(cpu, data);
cpu               511 arch/ia64/kernel/salinfo.c 	} else if (sscanf(cmd, "clear %d", &cpu) == 1) {
cpu               513 arch/ia64/kernel/salinfo.c 		if ((ret = salinfo_log_clear(data, cpu)))
cpu               515 arch/ia64/kernel/salinfo.c 	} else if (sscanf(cmd, "oemdata %d %d", &cpu, &offset) == 2) {
cpu               527 arch/ia64/kernel/salinfo.c 			count = work_on_cpu_safe(cpu, salinfo_platform_oemdata_cpu,
cpu               545 arch/ia64/kernel/salinfo.c static int salinfo_cpu_online(unsigned int cpu)
cpu               552 arch/ia64/kernel/salinfo.c 		cpumask_set_cpu(cpu, &data->cpu_event);
cpu               559 arch/ia64/kernel/salinfo.c static int salinfo_cpu_pre_down(unsigned int cpu)
cpu               571 arch/ia64/kernel/salinfo.c 			if (data_saved->buffer && data_saved->cpu == cpu)
cpu               574 arch/ia64/kernel/salinfo.c 		cpumask_clear_cpu(cpu, &data->cpu_event);
cpu               659 arch/ia64/kernel/setup.c #	define cpunum	c->cpu
cpu               836 arch/ia64/kernel/setup.c 	c->cpu = smp_processor_id();
cpu               171 arch/ia64/kernel/smp.c 	unsigned int cpu;
cpu               173 arch/ia64/kernel/smp.c 	for_each_cpu(cpu, mask) {
cpu               174 arch/ia64/kernel/smp.c 			send_IPI_single(cpu, op);
cpu               210 arch/ia64/kernel/smp.c 	unsigned int cpu, self_cpu;
cpu               212 arch/ia64/kernel/smp.c 	for_each_online_cpu(cpu) {
cpu               213 arch/ia64/kernel/smp.c 		if (cpu != self_cpu) {
cpu               214 arch/ia64/kernel/smp.c 			if(kdump_status[cpu] == 0)
cpu               215 arch/ia64/kernel/smp.c 				ia64_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
cpu               224 arch/ia64/kernel/smp.c smp_send_reschedule (int cpu)
cpu               226 arch/ia64/kernel/smp.c 	ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
cpu               234 arch/ia64/kernel/smp.c smp_send_local_flush_tlb (int cpu)
cpu               236 arch/ia64/kernel/smp.c 	ia64_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
cpu               260 arch/ia64/kernel/smp.c 	int mycpu, cpu, flush_mycpu = 0;
cpu               265 arch/ia64/kernel/smp.c 	for_each_cpu(cpu, &cpumask)
cpu               266 arch/ia64/kernel/smp.c 		counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff;
cpu               269 arch/ia64/kernel/smp.c 	for_each_cpu(cpu, &cpumask) {
cpu               270 arch/ia64/kernel/smp.c 		if (cpu == mycpu)
cpu               273 arch/ia64/kernel/smp.c 			smp_send_local_flush_tlb(cpu);
cpu               279 arch/ia64/kernel/smp.c 	for_each_cpu(cpu, &cpumask)
cpu               280 arch/ia64/kernel/smp.c 		while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff))
cpu               319 arch/ia64/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               321 arch/ia64/kernel/smp.c 	send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
cpu               461 arch/ia64/kernel/smpboot.c do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
cpu               466 arch/ia64/kernel/smpboot.c 	Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
cpu               468 arch/ia64/kernel/smpboot.c 	set_brendez_area(cpu);
cpu               469 arch/ia64/kernel/smpboot.c 	ia64_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
cpu               476 arch/ia64/kernel/smpboot.c 		if (cpumask_test_cpu(cpu, &cpu_callin_map))
cpu               483 arch/ia64/kernel/smpboot.c 	if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
cpu               484 arch/ia64/kernel/smpboot.c 		printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
cpu               485 arch/ia64/kernel/smpboot.c 		ia64_cpu_to_sapicid[cpu] = -1;
cpu               486 arch/ia64/kernel/smpboot.c 		set_cpu_online(cpu, false);  /* was set in smp_callin() */
cpu               508 arch/ia64/kernel/smpboot.c 	int sapicid, cpu, i;
cpu               511 arch/ia64/kernel/smpboot.c 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpu               512 arch/ia64/kernel/smpboot.c 		ia64_cpu_to_sapicid[cpu] = -1;
cpu               518 arch/ia64/kernel/smpboot.c 	for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
cpu               522 arch/ia64/kernel/smpboot.c 		set_cpu_present(cpu, true);
cpu               523 arch/ia64/kernel/smpboot.c 		set_cpu_possible(cpu, true);
cpu               524 arch/ia64/kernel/smpboot.c 		ia64_cpu_to_sapicid[cpu] = sapicid;
cpu               525 arch/ia64/kernel/smpboot.c 		cpu++;
cpu               550 arch/ia64/kernel/smpboot.c 	current_thread_info()->cpu = 0;
cpu               574 arch/ia64/kernel/smpboot.c clear_cpu_sibling_map(int cpu)
cpu               578 arch/ia64/kernel/smpboot.c 	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
cpu               579 arch/ia64/kernel/smpboot.c 		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
cpu               580 arch/ia64/kernel/smpboot.c 	for_each_cpu(i, &cpu_core_map[cpu])
cpu               581 arch/ia64/kernel/smpboot.c 		cpumask_clear_cpu(cpu, &cpu_core_map[i]);
cpu               583 arch/ia64/kernel/smpboot.c 	per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
cpu               587 arch/ia64/kernel/smpboot.c remove_siblinginfo(int cpu)
cpu               591 arch/ia64/kernel/smpboot.c 	if (cpu_data(cpu)->threads_per_core == 1 &&
cpu               592 arch/ia64/kernel/smpboot.c 	    cpu_data(cpu)->cores_per_socket == 1) {
cpu               593 arch/ia64/kernel/smpboot.c 		cpumask_clear_cpu(cpu, &cpu_core_map[cpu]);
cpu               594 arch/ia64/kernel/smpboot.c 		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
cpu               598 arch/ia64/kernel/smpboot.c 	last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0);
cpu               601 arch/ia64/kernel/smpboot.c 	clear_cpu_sibling_map(cpu);
cpu               606 arch/ia64/kernel/smpboot.c int migrate_platform_irqs(unsigned int cpu)
cpu               616 arch/ia64/kernel/smpboot.c 	if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
cpu               617 arch/ia64/kernel/smpboot.c 		printk ("CPU (%d) is CPEI Target\n", cpu);
cpu               639 arch/ia64/kernel/smpboot.c 			printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
cpu               649 arch/ia64/kernel/smpboot.c 	int cpu = smp_processor_id();
cpu               654 arch/ia64/kernel/smpboot.c 	if (cpu == 0 && !bsp_remove_ok) {
cpu               659 arch/ia64/kernel/smpboot.c 	set_cpu_online(cpu, false);
cpu               661 arch/ia64/kernel/smpboot.c 	if (migrate_platform_irqs(cpu)) {
cpu               662 arch/ia64/kernel/smpboot.c 		set_cpu_online(cpu, true);
cpu               666 arch/ia64/kernel/smpboot.c 	remove_siblinginfo(cpu);
cpu               669 arch/ia64/kernel/smpboot.c 	cpumask_clear_cpu(cpu, &cpu_callin_map);
cpu               673 arch/ia64/kernel/smpboot.c void __cpu_die(unsigned int cpu)
cpu               679 arch/ia64/kernel/smpboot.c 		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
cpu               681 arch/ia64/kernel/smpboot.c 			printk ("CPU %d is now offline\n", cpu);
cpu               686 arch/ia64/kernel/smpboot.c  	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
cpu               693 arch/ia64/kernel/smpboot.c 	int cpu;
cpu               700 arch/ia64/kernel/smpboot.c 	for_each_online_cpu(cpu) {
cpu               701 arch/ia64/kernel/smpboot.c 		bogosum += cpu_data(cpu)->loops_per_jiffy;
cpu               708 arch/ia64/kernel/smpboot.c static inline void set_cpu_sibling_map(int cpu)
cpu               713 arch/ia64/kernel/smpboot.c 		if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
cpu               714 arch/ia64/kernel/smpboot.c 			cpumask_set_cpu(i, &cpu_core_map[cpu]);
cpu               715 arch/ia64/kernel/smpboot.c 			cpumask_set_cpu(cpu, &cpu_core_map[i]);
cpu               716 arch/ia64/kernel/smpboot.c 			if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
cpu               718 arch/ia64/kernel/smpboot.c 						&per_cpu(cpu_sibling_map, cpu));
cpu               719 arch/ia64/kernel/smpboot.c 				cpumask_set_cpu(cpu,
cpu               727 arch/ia64/kernel/smpboot.c __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu               732 arch/ia64/kernel/smpboot.c 	sapicid = ia64_cpu_to_sapicid[cpu];
cpu               740 arch/ia64/kernel/smpboot.c 	if (cpumask_test_cpu(cpu, &cpu_callin_map))
cpu               743 arch/ia64/kernel/smpboot.c 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
cpu               745 arch/ia64/kernel/smpboot.c 	ret = do_boot_cpu(sapicid, cpu, tidle);
cpu               749 arch/ia64/kernel/smpboot.c 	if (cpu_data(cpu)->threads_per_core == 1 &&
cpu               750 arch/ia64/kernel/smpboot.c 	    cpu_data(cpu)->cores_per_socket == 1) {
cpu               751 arch/ia64/kernel/smpboot.c 		cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
cpu               752 arch/ia64/kernel/smpboot.c 		cpumask_set_cpu(cpu, &cpu_core_map[cpu]);
cpu               756 arch/ia64/kernel/smpboot.c 	set_cpu_sibling_map(cpu);
cpu               221 arch/ia64/kernel/time.c 	int cpu = smp_processor_id();
cpu               232 arch/ia64/kernel/time.c 	if (cpu) {
cpu               233 arch/ia64/kernel/time.c 		unsigned long hi = 1UL << ia64_fls(cpu);
cpu               234 arch/ia64/kernel/time.c 		shift = (2*(cpu - hi) + 1) * delta/hi/2;
cpu                50 arch/ia64/kernel/topology.c 		sysfs_cpus[num].cpu.hotpluggable = 1;
cpu                52 arch/ia64/kernel/topology.c 	return register_cpu(&sysfs_cpus[num].cpu, num);
cpu                58 arch/ia64/kernel/topology.c 	unregister_cpu(&sysfs_cpus[num].cpu);
cpu                65 arch/ia64/kernel/topology.c 	return register_cpu(&sysfs_cpus[num].cpu, num);
cpu               138 arch/ia64/kernel/topology.c static void cache_shared_cpu_map_setup(unsigned int cpu,
cpu               145 arch/ia64/kernel/topology.c 	if (cpu_data(cpu)->threads_per_core <= 1 &&
cpu               146 arch/ia64/kernel/topology.c 		cpu_data(cpu)->cores_per_socket <= 1) {
cpu               147 arch/ia64/kernel/topology.c 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
cpu               160 arch/ia64/kernel/topology.c 			if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
cpu               173 arch/ia64/kernel/topology.c static void cache_shared_cpu_map_setup(unsigned int cpu,
cpu               176 arch/ia64/kernel/topology.c 	cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
cpu               295 arch/ia64/kernel/topology.c static void cpu_cache_sysfs_exit(unsigned int cpu)
cpu               297 arch/ia64/kernel/topology.c 	kfree(all_cpu_cache_info[cpu].cache_leaves);
cpu               298 arch/ia64/kernel/topology.c 	all_cpu_cache_info[cpu].cache_leaves = NULL;
cpu               299 arch/ia64/kernel/topology.c 	all_cpu_cache_info[cpu].num_cache_leaves = 0;
cpu               300 arch/ia64/kernel/topology.c 	memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
cpu               304 arch/ia64/kernel/topology.c static int cpu_cache_sysfs_init(unsigned int cpu)
cpu               333 arch/ia64/kernel/topology.c 			cache_shared_cpu_map_setup(cpu,
cpu               339 arch/ia64/kernel/topology.c 	all_cpu_cache_info[cpu].cache_leaves = this_cache;
cpu               340 arch/ia64/kernel/topology.c 	all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
cpu               342 arch/ia64/kernel/topology.c 	memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
cpu               348 arch/ia64/kernel/topology.c static int cache_add_dev(unsigned int cpu)
cpu               350 arch/ia64/kernel/topology.c 	struct device *sys_dev = get_cpu_device(cpu);
cpu               355 arch/ia64/kernel/topology.c 	if (all_cpu_cache_info[cpu].kobj.parent)
cpu               359 arch/ia64/kernel/topology.c 	retval = cpu_cache_sysfs_init(cpu);
cpu               363 arch/ia64/kernel/topology.c 	retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
cpu               367 arch/ia64/kernel/topology.c 		cpu_cache_sysfs_exit(cpu);
cpu               371 arch/ia64/kernel/topology.c 	for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
cpu               372 arch/ia64/kernel/topology.c 		this_object = LEAF_KOBJECT_PTR(cpu,i);
cpu               375 arch/ia64/kernel/topology.c 					      &all_cpu_cache_info[cpu].kobj,
cpu               379 arch/ia64/kernel/topology.c 				kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
cpu               381 arch/ia64/kernel/topology.c 			kobject_put(&all_cpu_cache_info[cpu].kobj);
cpu               382 arch/ia64/kernel/topology.c 			cpu_cache_sysfs_exit(cpu);
cpu               387 arch/ia64/kernel/topology.c 	kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
cpu               392 arch/ia64/kernel/topology.c static int cache_remove_dev(unsigned int cpu)
cpu               396 arch/ia64/kernel/topology.c 	for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
cpu               397 arch/ia64/kernel/topology.c 		kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
cpu               399 arch/ia64/kernel/topology.c 	if (all_cpu_cache_info[cpu].kobj.parent) {
cpu               400 arch/ia64/kernel/topology.c 		kobject_put(&all_cpu_cache_info[cpu].kobj);
cpu               401 arch/ia64/kernel/topology.c 		memset(&all_cpu_cache_info[cpu].kobj,
cpu               406 arch/ia64/kernel/topology.c 	cpu_cache_sysfs_exit(cpu);
cpu                55 arch/ia64/kernel/traps.c 	int cpu = get_cpu();
cpu                57 arch/ia64/kernel/traps.c 	if (die.lock_owner != cpu) {
cpu                60 arch/ia64/kernel/traps.c 		die.lock_owner = cpu;
cpu                47 arch/ia64/mm/contig.c 	unsigned int cpu;
cpu                58 arch/ia64/mm/contig.c 	for_each_possible_cpu(cpu) {
cpu                59 arch/ia64/mm/contig.c 		void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
cpu                62 arch/ia64/mm/contig.c 		__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
cpu                63 arch/ia64/mm/contig.c 		per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
cpu                74 arch/ia64/mm/contig.c 		if (cpu == 0)
cpu               109 arch/ia64/mm/contig.c 	unsigned int cpu;
cpu               118 arch/ia64/mm/contig.c 	for_each_possible_cpu(cpu)
cpu               119 arch/ia64/mm/contig.c 		gi->cpu_map[gi->nr_units++] = cpu;
cpu               100 arch/ia64/mm/discontig.c 	int cpu, n = 0;
cpu               102 arch/ia64/mm/discontig.c 	for_each_possible_early_cpu(cpu)
cpu               103 arch/ia64/mm/discontig.c 		if (node == node_cpuid[cpu].nid)
cpu               139 arch/ia64/mm/discontig.c 	int cpu;
cpu               141 arch/ia64/mm/discontig.c 	for_each_possible_early_cpu(cpu) {
cpu               142 arch/ia64/mm/discontig.c 		void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
cpu               144 arch/ia64/mm/discontig.c 		if (node != node_cpuid[cpu].nid)
cpu               148 arch/ia64/mm/discontig.c 		__per_cpu_offset[cpu] = (char *)__va(cpu_data) -
cpu               160 arch/ia64/mm/discontig.c 		if (cpu == 0)
cpu               187 arch/ia64/mm/discontig.c 	unsigned int cpu;
cpu               198 arch/ia64/mm/discontig.c 	for_each_possible_cpu(cpu)
cpu               200 arch/ia64/mm/discontig.c 			   (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
cpu               206 arch/ia64/mm/discontig.c 		for_each_possible_cpu(cpu)
cpu               207 arch/ia64/mm/discontig.c 			if (node == node_cpuid[cpu].nid)
cpu               208 arch/ia64/mm/discontig.c 				cpu_map[unit++] = cpu;
cpu               233 arch/ia64/mm/discontig.c 		cpu = cpu_map[unit];
cpu               234 arch/ia64/mm/discontig.c 		node = node_cpuid[cpu].nid;
cpu               244 arch/ia64/mm/discontig.c 		gi->base_offset		= __per_cpu_offset[cpu] + base_offset;
cpu               400 arch/ia64/mm/discontig.c 	int cpu, node;
cpu               406 arch/ia64/mm/discontig.c 	for_each_possible_early_cpu(cpu) {
cpu               407 arch/ia64/mm/discontig.c 		node = node_cpuid[cpu].nid;
cpu               408 arch/ia64/mm/discontig.c 		per_cpu(ia64_cpu_info, cpu).node_data =
cpu               414 arch/ia64/mm/discontig.c 		cpu = 0;
cpu               415 arch/ia64/mm/discontig.c 		node = node_cpuid[cpu].nid;
cpu               529 arch/ia64/mm/discontig.c 	int cpu;
cpu               534 arch/ia64/mm/discontig.c 		for_each_possible_early_cpu(cpu)
cpu               535 arch/ia64/mm/discontig.c 			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
cpu                91 arch/ia64/mm/numa.c void numa_clear_node(int cpu)
cpu                93 arch/ia64/mm/numa.c 	unmap_cpu_from_node(cpu, NUMA_NO_NODE);
cpu                81 arch/ia64/mm/tlb.c 	int i, cpu;
cpu                99 arch/ia64/mm/tlb.c 	cpu = get_cpu(); /* prevent preemption/migration */
cpu               101 arch/ia64/mm/tlb.c 		if (i != cpu)
cpu               377 arch/ia64/mm/tlb.c 	int cpu = smp_processor_id();
cpu               398 arch/ia64/mm/tlb.c 		per_cpu(ia64_tr_num, cpu) = 8;
cpu               401 arch/ia64/mm/tlb.c 	per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
cpu               402 arch/ia64/mm/tlb.c 	if (per_cpu(ia64_tr_num, cpu) >
cpu               404 arch/ia64/mm/tlb.c 		per_cpu(ia64_tr_num, cpu) =
cpu               406 arch/ia64/mm/tlb.c 	if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
cpu               408 arch/ia64/mm/tlb.c 		per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
cpu               460 arch/ia64/mm/tlb.c 	int cpu = smp_processor_id();
cpu               462 arch/ia64/mm/tlb.c 	if (!ia64_idtrs[cpu]) {
cpu               463 arch/ia64/mm/tlb.c 		ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX,
cpu               466 arch/ia64/mm/tlb.c 		if (!ia64_idtrs[cpu])
cpu               472 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu];
cpu               473 arch/ia64/mm/tlb.c 		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
cpu               484 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
cpu               485 arch/ia64/mm/tlb.c 		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
cpu               496 arch/ia64/mm/tlb.c 	for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
cpu               499 arch/ia64/mm/tlb.c 			if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
cpu               503 arch/ia64/mm/tlb.c 			if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
cpu               507 arch/ia64/mm/tlb.c 			if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
cpu               508 arch/ia64/mm/tlb.c 			    !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
cpu               517 arch/ia64/mm/tlb.c 	if (i >= per_cpu(ia64_tr_num, cpu))
cpu               521 arch/ia64/mm/tlb.c 	if (i > per_cpu(ia64_tr_used, cpu))
cpu               522 arch/ia64/mm/tlb.c 		per_cpu(ia64_tr_used, cpu) = i;
cpu               528 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu] + i;
cpu               537 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
cpu               560 arch/ia64/mm/tlb.c 	int cpu = smp_processor_id();
cpu               564 arch/ia64/mm/tlb.c 	if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
cpu               568 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu] + slot;
cpu               577 arch/ia64/mm/tlb.c 		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
cpu               585 arch/ia64/mm/tlb.c 	for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
cpu               586 arch/ia64/mm/tlb.c 		if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
cpu               587 arch/ia64/mm/tlb.c 		    ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
cpu               590 arch/ia64/mm/tlb.c 	per_cpu(ia64_tr_used, cpu) = i;
cpu                89 arch/ia64/uv/kernel/setup.c 	int nid, cpu, m_val, n_val;
cpu               105 arch/ia64/uv/kernel/setup.c 	for_each_present_cpu(cpu) {
cpu               106 arch/ia64/uv/kernel/setup.c 		nid = cpu_to_node(cpu);
cpu               107 arch/ia64/uv/kernel/setup.c 		uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
cpu               108 arch/ia64/uv/kernel/setup.c 		uv_cpu_hub_info(cpu)->lowmem_remap_top =
cpu               110 arch/ia64/uv/kernel/setup.c 		uv_cpu_hub_info(cpu)->m_val = m_val;
cpu               111 arch/ia64/uv/kernel/setup.c 		uv_cpu_hub_info(cpu)->n_val = n_val;
cpu               112 arch/ia64/uv/kernel/setup.c 		uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1;
cpu               113 arch/ia64/uv/kernel/setup.c 		uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
cpu               114 arch/ia64/uv/kernel/setup.c 		uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
cpu               115 arch/ia64/uv/kernel/setup.c 		uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
cpu               116 arch/ia64/uv/kernel/setup.c 		uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
cpu               117 arch/ia64/uv/kernel/setup.c 		printk(KERN_DEBUG "UV cpu %d, nid %d\n", cpu, nid);
cpu                32 arch/m68k/include/asm/thread_info.h 	__u32			cpu;		/* should always be 0 on m68k */
cpu               400 arch/m68k/kernel/setup_mm.c 	const char *cpu, *mmu, *fpu;
cpu               410 arch/m68k/kernel/setup_mm.c 		cpu = "68020";
cpu               413 arch/m68k/kernel/setup_mm.c 		cpu = "68030";
cpu               416 arch/m68k/kernel/setup_mm.c 		cpu = "68040";
cpu               419 arch/m68k/kernel/setup_mm.c 		cpu = "68060";
cpu               422 arch/m68k/kernel/setup_mm.c 		cpu = "ColdFire";
cpu               425 arch/m68k/kernel/setup_mm.c 		cpu = "680x0";
cpu               473 arch/m68k/kernel/setup_mm.c 		   cpu, mmu, fpu,
cpu               177 arch/m68k/kernel/setup_no.c 	char *cpu, *mmu, *fpu;
cpu               180 arch/m68k/kernel/setup_no.c 	cpu = CPU_NAME;
cpu               191 arch/m68k/kernel/setup_no.c 		      cpu, mmu, fpu,
cpu                96 arch/microblaze/include/asm/cpuinfo.h void set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu);
cpu                97 arch/microblaze/include/asm/cpuinfo.h void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu);
cpu                99 arch/microblaze/include/asm/cpuinfo.h static inline unsigned int fcpu(struct device_node *cpu, char *n)
cpu               103 arch/microblaze/include/asm/cpuinfo.h 	of_property_read_u32(cpu, n, &val);
cpu                70 arch/microblaze/include/asm/thread_info.h 	__u32			cpu; /* current CPU */
cpu                84 arch/microblaze/include/asm/thread_info.h 	.cpu		= 0,			\
cpu                33 arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
cpu                23 arch/microblaze/kernel/cpu/cpuinfo-static.c void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
cpu                28 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) |
cpu                29 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) |
cpu                30 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) |
cpu                31 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0);
cpu                43 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul");
cpu                51 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->use_fpu = fcpu(cpu, "xlnx,use-fpu");
cpu                59 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,unaligned-exceptions") ?
cpu                61 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,ill-opcode-exception") ?
cpu                63 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,iopb-bus-exception") ?
cpu                65 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,dopb-bus-exception") ?
cpu                67 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,div-zero-exception") ?
cpu                69 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,fpu-exception") ? PVR2_FPU_EXC_MASK : 0) |
cpu                70 arch/microblaze/kernel/cpu/cpuinfo-static.c 		(fcpu(cpu, "xlnx,fsl-exception") ? PVR2_USE_EXTEND_FSL : 0);
cpu                72 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->use_icache = fcpu(cpu, "xlnx,use-icache");
cpu                73 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits");
cpu                74 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr");
cpu                75 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2;
cpu                77 arch/microblaze/kernel/cpu/cpuinfo-static.c 		if (fcpu(cpu, "xlnx,icache-use-fsl"))
cpu                82 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->icache_size = fcpu(cpu, "i-cache-size");
cpu                83 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->icache_base = fcpu(cpu, "i-cache-baseaddr");
cpu                84 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->icache_high = fcpu(cpu, "i-cache-highaddr");
cpu                86 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->use_dcache = fcpu(cpu, "xlnx,use-dcache");
cpu                87 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag");
cpu                88 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr");
cpu                89 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2;
cpu                91 arch/microblaze/kernel/cpu/cpuinfo-static.c 		if (fcpu(cpu, "xlnx,dcache-use-fsl"))
cpu                96 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->dcache_size = fcpu(cpu, "d-cache-size");
cpu                97 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->dcache_base = fcpu(cpu, "d-cache-baseaddr");
cpu                98 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->dcache_high = fcpu(cpu, "d-cache-highaddr");
cpu                99 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback");
cpu               101 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->use_dopb = fcpu(cpu, "xlnx,d-opb");
cpu               102 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->use_iopb = fcpu(cpu, "xlnx,i-opb");
cpu               103 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->use_dlmb = fcpu(cpu, "xlnx,d-lmb");
cpu               104 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->use_ilmb = fcpu(cpu, "xlnx,i-lmb");
cpu               106 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->num_fsl = fcpu(cpu, "xlnx,fsl-links");
cpu               107 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->irq_edge = fcpu(cpu, "xlnx,interrupt-is-edge");
cpu               108 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->irq_positive = fcpu(cpu, "xlnx,edge-is-positive");
cpu               111 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->hw_debug = fcpu(cpu, "xlnx,debug-enabled");
cpu               112 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->num_pc_brk = fcpu(cpu, "xlnx,number-of-pc-brk");
cpu               113 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->num_rd_brk = fcpu(cpu, "xlnx,number-of-rd-addr-brk");
cpu               114 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->num_wr_brk = fcpu(cpu, "xlnx,number-of-wr-addr-brk");
cpu               116 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->pvr_user1 = fcpu(cpu, "xlnx,pvr-user1");
cpu               117 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->pvr_user2 = fcpu(cpu, "xlnx,pvr-user2");
cpu               119 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->mmu = fcpu(cpu, "xlnx,use-mmu");
cpu               120 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->mmu_privins = fcpu(cpu, "xlnx,mmu-privileged-instr");
cpu               121 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->endian = fcpu(cpu, "xlnx,endianness");
cpu                88 arch/microblaze/kernel/cpu/cpuinfo.c static struct device_node *cpu;
cpu                92 arch/microblaze/kernel/cpu/cpuinfo.c 	cpu = of_get_cpu_node(0, NULL);
cpu                93 arch/microblaze/kernel/cpu/cpuinfo.c 	if (!cpu)
cpu               102 arch/microblaze/kernel/cpu/cpuinfo.c 		set_cpuinfo_static(&cpuinfo, cpu);
cpu               109 arch/microblaze/kernel/cpu/cpuinfo.c 		set_cpuinfo_static(&cpuinfo, cpu);
cpu               110 arch/microblaze/kernel/cpu/cpuinfo.c 		set_cpuinfo_pvr_full(&cpuinfo, cpu);
cpu               114 arch/microblaze/kernel/cpu/cpuinfo.c 		set_cpuinfo_static(&cpuinfo, cpu);
cpu               121 arch/microblaze/kernel/cpu/cpuinfo.c 	of_node_put(cpu);
cpu               128 arch/microblaze/kernel/cpu/cpuinfo.c 	clk = of_clk_get(cpu, 0);
cpu               132 arch/microblaze/kernel/cpu/cpuinfo.c 		cpuinfo.cpu_clock_freq = fcpu(cpu, "timebase-frequency");
cpu                61 arch/mips/ar7/clock.c 	struct tnetd7300_clock cpu;
cpu                82 arch/mips/ar7/clock.c 	struct tnetd7200_clock cpu;
cpu               249 arch/mips/ar7/clock.c 			&clocks->cpu, bootcr, AR7_AFE_CLOCK);
cpu               351 arch/mips/ar7/clock.c 		tnetd7200_set_clock(cpu_base, &clocks->cpu,
cpu               364 arch/mips/ar7/clock.c 			tnetd7200_set_clock(cpu_base, &clocks->cpu,
cpu               297 arch/mips/bcm63xx/cpu.c 	unsigned int cpu = smp_processor_id();
cpu               306 arch/mips/bcm63xx/cpu.c 			__cpu_name[cpu] = "Broadcom BCM6338";
cpu                28 arch/mips/bcm63xx/irq.c static void (*dispatch_internal)(int cpu);
cpu                53 arch/mips/bcm63xx/irq.c static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
cpu                56 arch/mips/bcm63xx/irq.c 	bool enable = cpu_online(cpu);
cpu                60 arch/mips/bcm63xx/irq.c 		enable &= cpumask_test_cpu(cpu, m);
cpu                62 arch/mips/bcm63xx/irq.c 		enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
cpu                75 arch/mips/bcm63xx/irq.c void __dispatch_internal_##width(int cpu)				\
cpu                81 arch/mips/bcm63xx/irq.c 	unsigned int *next = &i[cpu];					\
cpu                89 arch/mips/bcm63xx/irq.c 		val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
cpu                90 arch/mips/bcm63xx/irq.c 		val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
cpu               119 arch/mips/bcm63xx/irq.c 	int cpu;							\
cpu               122 arch/mips/bcm63xx/irq.c 	for_each_present_cpu(cpu) {					\
cpu               123 arch/mips/bcm63xx/irq.c 		if (!irq_mask_addr[cpu])				\
cpu               126 arch/mips/bcm63xx/irq.c 		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
cpu               128 arch/mips/bcm63xx/irq.c 		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
cpu               141 arch/mips/bcm63xx/irq.c 	int cpu;							\
cpu               144 arch/mips/bcm63xx/irq.c 	for_each_present_cpu(cpu) {					\
cpu               145 arch/mips/bcm63xx/irq.c 		if (!irq_mask_addr[cpu])				\
cpu               148 arch/mips/bcm63xx/irq.c 		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
cpu               149 arch/mips/bcm63xx/irq.c 		if (enable_irq_for_cpu(cpu, d, m))			\
cpu               153 arch/mips/bcm63xx/irq.c 		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
cpu               133 arch/mips/cavium-octeon/octeon-irq.c static int octeon_coreid_for_cpu(int cpu)
cpu               136 arch/mips/cavium-octeon/octeon-irq.c 	return cpu_logical_map(cpu);
cpu               265 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu               271 arch/mips/cavium-octeon/octeon-irq.c 		cpu = cd->current_cpu;
cpu               273 arch/mips/cavium-octeon/octeon-irq.c 			cpu = cpumask_next(cpu, mask);
cpu               274 arch/mips/cavium-octeon/octeon-irq.c 			if (cpu >= nr_cpu_ids) {
cpu               275 arch/mips/cavium-octeon/octeon-irq.c 				cpu = -1;
cpu               277 arch/mips/cavium-octeon/octeon-irq.c 			} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
cpu               282 arch/mips/cavium-octeon/octeon-irq.c 		cpu = cpumask_first(mask);
cpu               284 arch/mips/cavium-octeon/octeon-irq.c 		cpu = smp_processor_id();
cpu               286 arch/mips/cavium-octeon/octeon-irq.c 	cd->current_cpu = cpu;
cpu               287 arch/mips/cavium-octeon/octeon-irq.c 	return cpu;
cpu               295 arch/mips/cavium-octeon/octeon-irq.c 	int cpu = next_cpu_for_irq(data);
cpu               296 arch/mips/cavium-octeon/octeon-irq.c 	int coreid = octeon_coreid_for_cpu(cpu);
cpu               300 arch/mips/cavium-octeon/octeon-irq.c 	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
cpu               306 arch/mips/cavium-octeon/octeon-irq.c 		pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
cpu               315 arch/mips/cavium-octeon/octeon-irq.c 		pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
cpu               395 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu               401 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu) {
cpu               402 arch/mips/cavium-octeon/octeon-irq.c 		int coreid = octeon_coreid_for_cpu(cpu);
cpu               403 arch/mips/cavium-octeon/octeon-irq.c 		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
cpu               405 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
cpu               407 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
cpu               428 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu               434 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu) {
cpu               435 arch/mips/cavium-octeon/octeon-irq.c 		int coreid = octeon_coreid_for_cpu(cpu);
cpu               436 arch/mips/cavium-octeon/octeon-irq.c 		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
cpu               438 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
cpu               440 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
cpu               464 arch/mips/cavium-octeon/octeon-irq.c 	int cpu = next_cpu_for_irq(data);
cpu               475 arch/mips/cavium-octeon/octeon-irq.c 		int index = octeon_coreid_for_cpu(cpu) * 2;
cpu               476 arch/mips/cavium-octeon/octeon-irq.c 		set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
cpu               479 arch/mips/cavium-octeon/octeon-irq.c 		int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
cpu               480 arch/mips/cavium-octeon/octeon-irq.c 		set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cpu               491 arch/mips/cavium-octeon/octeon-irq.c 	int cpu = next_cpu_for_irq(data);
cpu               492 arch/mips/cavium-octeon/octeon-irq.c 	int index = octeon_coreid_for_cpu(cpu);
cpu               507 arch/mips/cavium-octeon/octeon-irq.c 	int cpu = next_cpu_for_irq(data);
cpu               508 arch/mips/cavium-octeon/octeon-irq.c 	int index = octeon_coreid_for_cpu(cpu);
cpu               520 arch/mips/cavium-octeon/octeon-irq.c 	int cpu = next_cpu_for_irq(data);
cpu               521 arch/mips/cavium-octeon/octeon-irq.c 	int index = octeon_coreid_for_cpu(cpu);
cpu               532 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu               539 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu) {
cpu               540 arch/mips/cavium-octeon/octeon-irq.c 		int coreid = octeon_coreid_for_cpu(cpu);
cpu               613 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu               621 arch/mips/cavium-octeon/octeon-irq.c 		for_each_online_cpu(cpu) {
cpu               622 arch/mips/cavium-octeon/octeon-irq.c 			int index = octeon_coreid_for_cpu(cpu) * 2;
cpu               624 arch/mips/cavium-octeon/octeon-irq.c 				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
cpu               628 arch/mips/cavium-octeon/octeon-irq.c 		for_each_online_cpu(cpu) {
cpu               629 arch/mips/cavium-octeon/octeon-irq.c 			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
cpu               631 arch/mips/cavium-octeon/octeon-irq.c 				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cpu               643 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu               651 arch/mips/cavium-octeon/octeon-irq.c 		for_each_online_cpu(cpu) {
cpu               652 arch/mips/cavium-octeon/octeon-irq.c 			int index = octeon_coreid_for_cpu(cpu) * 2;
cpu               654 arch/mips/cavium-octeon/octeon-irq.c 				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
cpu               658 arch/mips/cavium-octeon/octeon-irq.c 		for_each_online_cpu(cpu) {
cpu               659 arch/mips/cavium-octeon/octeon-irq.c 			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
cpu               661 arch/mips/cavium-octeon/octeon-irq.c 				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cpu               759 arch/mips/cavium-octeon/octeon-irq.c 	int cpu = smp_processor_id();
cpu               763 arch/mips/cavium-octeon/octeon-irq.c 	if (!cpumask_test_cpu(cpu, mask))
cpu               772 arch/mips/cavium-octeon/octeon-irq.c 		cpumask_clear_cpu(cpu, &new_affinity);
cpu               784 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu               805 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu) {
cpu               806 arch/mips/cavium-octeon/octeon-irq.c 		int coreid = octeon_coreid_for_cpu(cpu);
cpu               808 arch/mips/cavium-octeon/octeon-irq.c 		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
cpu               812 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
cpu               814 arch/mips/cavium-octeon/octeon-irq.c 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
cpu               816 arch/mips/cavium-octeon/octeon-irq.c 		if (cpumask_test_cpu(cpu, dest) && enable_one) {
cpu               846 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu               858 arch/mips/cavium-octeon/octeon-irq.c 		for_each_online_cpu(cpu) {
cpu               859 arch/mips/cavium-octeon/octeon-irq.c 			unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
cpu               860 arch/mips/cavium-octeon/octeon-irq.c 			int index = octeon_coreid_for_cpu(cpu) * 2;
cpu               861 arch/mips/cavium-octeon/octeon-irq.c 			if (cpumask_test_cpu(cpu, dest) && enable_one) {
cpu               871 arch/mips/cavium-octeon/octeon-irq.c 		for_each_online_cpu(cpu) {
cpu               872 arch/mips/cavium-octeon/octeon-irq.c 			unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
cpu               873 arch/mips/cavium-octeon/octeon-irq.c 			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
cpu               874 arch/mips/cavium-octeon/octeon-irq.c 			if (cpumask_test_cpu(cpu, dest) && enable_one) {
cpu               891 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu               902 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu) {
cpu               903 arch/mips/cavium-octeon/octeon-irq.c 		int index = octeon_coreid_for_cpu(cpu);
cpu               905 arch/mips/cavium-octeon/octeon-irq.c 		if (cpumask_test_cpu(cpu, dest) && enable_one) {
cpu              1071 arch/mips/cavium-octeon/octeon-irq.c 	int cpu = octeon_cpu_for_coreid(coreid);
cpu              1072 arch/mips/cavium-octeon/octeon-irq.c 	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
cpu              1075 arch/mips/cavium-octeon/octeon-irq.c 	pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
cpu              1093 arch/mips/cavium-octeon/octeon-irq.c 	int cpu = octeon_cpu_for_coreid(coreid);
cpu              1095 arch/mips/cavium-octeon/octeon-irq.c 	set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cpu              1654 arch/mips/cavium-octeon/octeon-irq.c 	int cpu = next_cpu_for_irq(data);
cpu              1655 arch/mips/cavium-octeon/octeon-irq.c 	int coreid = octeon_coreid_for_cpu(cpu);
cpu              1715 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu              1722 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu) {
cpu              1724 arch/mips/cavium-octeon/octeon-irq.c 			octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
cpu              1731 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu              1736 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu) {
cpu              1738 arch/mips/cavium-octeon/octeon-irq.c 			octeon_coreid_for_cpu(cpu));
cpu              1745 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu              1750 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu) {
cpu              1752 arch/mips/cavium-octeon/octeon-irq.c 			octeon_coreid_for_cpu(cpu));
cpu              1783 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu              1794 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu) {
cpu              1796 arch/mips/cavium-octeon/octeon-irq.c 		if (cpumask_test_cpu(cpu, dest) && enable_one) {
cpu              1799 arch/mips/cavium-octeon/octeon-irq.c 				octeon_coreid_for_cpu(cpu)) +
cpu              1803 arch/mips/cavium-octeon/octeon-irq.c 				octeon_coreid_for_cpu(cpu)) +
cpu              2383 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu              2390 arch/mips/cavium-octeon/octeon-irq.c 	cpu = next_cpu_for_irq(data);
cpu              2401 arch/mips/cavium-octeon/octeon-irq.c 	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
cpu              2495 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu              2506 arch/mips/cavium-octeon/octeon-irq.c 	cpu = cpumask_first(dest);
cpu              2507 arch/mips/cavium-octeon/octeon-irq.c 	if (cpu >= nr_cpu_ids)
cpu              2508 arch/mips/cavium-octeon/octeon-irq.c 		cpu = smp_processor_id();
cpu              2509 arch/mips/cavium-octeon/octeon-irq.c 	cd->current_cpu = cpu;
cpu              2518 arch/mips/cavium-octeon/octeon-irq.c 	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
cpu              2628 arch/mips/cavium-octeon/octeon-irq.c static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
cpu              2630 arch/mips/cavium-octeon/octeon-irq.c 	int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
cpu              2668 arch/mips/cavium-octeon/octeon-irq.c void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
cpu              2678 arch/mips/cavium-octeon/octeon-irq.c 	intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
cpu              2679 arch/mips/cavium-octeon/octeon-irq.c 	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
cpu              2689 arch/mips/cavium-octeon/octeon-irq.c static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
cpu              2697 arch/mips/cavium-octeon/octeon-irq.c 	intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
cpu              2698 arch/mips/cavium-octeon/octeon-irq.c 	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
cpu              2708 arch/mips/cavium-octeon/octeon-irq.c 		unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
cpu              2720 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu              2725 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu)
cpu              2726 arch/mips/cavium-octeon/octeon-irq.c 		octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
cpu              2731 arch/mips/cavium-octeon/octeon-irq.c 	int cpu;
cpu              2736 arch/mips/cavium-octeon/octeon-irq.c 	for_each_online_cpu(cpu)
cpu              2737 arch/mips/cavium-octeon/octeon-irq.c 		octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
cpu                90 arch/mips/cavium-octeon/setup.c 	int cpu = smp_processor_id();
cpu                93 arch/mips/cavium-octeon/setup.c 	set_cpu_online(cpu, false);
cpu               198 arch/mips/cavium-octeon/setup.c 	int cpu;
cpu               209 arch/mips/cavium-octeon/setup.c 	for_each_online_cpu(cpu)
cpu               210 arch/mips/cavium-octeon/setup.c 		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
cpu               276 arch/mips/cavium-octeon/setup.c 	int cpu;
cpu               279 arch/mips/cavium-octeon/setup.c 	for_each_online_cpu(cpu)
cpu               280 arch/mips/cavium-octeon/setup.c 		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
cpu               434 arch/mips/cavium-octeon/setup.c 	int cpu;
cpu               435 arch/mips/cavium-octeon/setup.c 	for_each_online_cpu(cpu)
cpu               436 arch/mips/cavium-octeon/setup.c 		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
cpu                99 arch/mips/cavium-octeon/smp.c void octeon_send_ipi_single(int cpu, unsigned int action)
cpu               101 arch/mips/cavium-octeon/smp.c 	int coreid = cpu_logical_map(cpu);
cpu               209 arch/mips/cavium-octeon/smp.c static int octeon_boot_secondary(int cpu, struct task_struct *idle)
cpu               213 arch/mips/cavium-octeon/smp.c 	pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
cpu               214 arch/mips/cavium-octeon/smp.c 		cpu_logical_map(cpu));
cpu               218 arch/mips/cavium-octeon/smp.c 	octeon_processor_boot = cpu_logical_map(cpu);
cpu               291 arch/mips/cavium-octeon/smp.c 	unsigned int cpu = smp_processor_id();
cpu               293 arch/mips/cavium-octeon/smp.c 	if (cpu == 0)
cpu               299 arch/mips/cavium-octeon/smp.c 	set_cpu_online(cpu, false);
cpu               309 arch/mips/cavium-octeon/smp.c static void octeon_cpu_die(unsigned int cpu)
cpu               311 arch/mips/cavium-octeon/smp.c 	int coreid = cpu_logical_map(cpu);
cpu               315 arch/mips/cavium-octeon/smp.c 	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
cpu               349 arch/mips/cavium-octeon/smp.c 	int cpu = cpu_number_map(cvmx_get_core_num());
cpu               353 arch/mips/cavium-octeon/smp.c 	per_cpu(cpu_state, cpu) = CPU_DEAD;
cpu               366 arch/mips/cavium-octeon/smp.c static int octeon_update_boot_vector(unsigned int cpu)
cpu               369 arch/mips/cavium-octeon/smp.c 	int coreid = cpu_logical_map(cpu);
cpu               476 arch/mips/cavium-octeon/smp.c static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
cpu               482 arch/mips/cavium-octeon/smp.c 			octeon_ciu3_mbox_send(cpu, i);
cpu               490 arch/mips/cavium-octeon/smp.c 	unsigned int cpu;
cpu               492 arch/mips/cavium-octeon/smp.c 	for_each_cpu(cpu, mask)
cpu               493 arch/mips/cavium-octeon/smp.c 		octeon_78xx_send_ipi_single(cpu, action);
cpu                68 arch/mips/fw/cfe/cfe_api.c int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1)
cpu                77 arch/mips/fw/cfe/cfe_api.c 	xiocb.plist.xiocb_cpuctl.cpu_number = cpu;
cpu                89 arch/mips/fw/cfe/cfe_api.c int cfe_cpu_stop(int cpu)
cpu                98 arch/mips/fw/cfe/cfe_api.c 	xiocb.plist.xiocb_cpuctl.cpu_number = cpu;
cpu                10 arch/mips/include/asm/amon.h int amon_cpu_avail(int cpu);
cpu                11 arch/mips/include/asm/amon.h int amon_cpu_start(int cpu, unsigned long pc, unsigned long sp,
cpu                36 arch/mips/include/asm/bugs.h 	unsigned int cpu = smp_processor_id();
cpu                38 arch/mips/include/asm/bugs.h 	cpu_data[cpu].udelay_val = loops_per_jiffy;
cpu                24 arch/mips/include/asm/cdmm.h 	unsigned int		cpu;
cpu                12 arch/mips/include/asm/cmp.h extern void cmp_boot_secondary(int cpu, struct task_struct *t);
cpu                17 arch/mips/include/asm/cmp.h extern void cmp_send_ipi(int cpu, unsigned int action);
cpu                85 arch/mips/include/asm/fw/cfe/cfe_api.h int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1);
cpu                86 arch/mips/include/asm/fw/cfe/cfe_api.h int cfe_cpu_stop(int cpu);
cpu                39 arch/mips/include/asm/irq.h static inline bool on_irq_stack(int cpu, unsigned long sp)
cpu                41 arch/mips/include/asm/irq.h 	unsigned long low = (unsigned long)irq_stack[cpu];
cpu               817 arch/mips/include/asm/kvm_host.h 	int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
cpu               818 arch/mips/include/asm/kvm_host.h 	int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
cpu               899 arch/mips/include/asm/kvm_host.h void kvm_mips_suspend_mm(int cpu);
cpu               900 arch/mips/include/asm/kvm_host.h void kvm_mips_resume_mm(int cpu);
cpu              1142 arch/mips/include/asm/kvm_host.h static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
cpu                17 arch/mips/include/asm/mach-ip27/topology.h #define cpu_to_node(cpu)	(sn_cpu_info[(cpu)].p_nodeid)
cpu                 7 arch/mips/include/asm/mach-loongson64/topology.h #define cpu_to_node(cpu)	(cpu_logical_map(cpu) >> 2)
cpu                24 arch/mips/include/asm/mach-pnx833x/pnx833x.h #define PNX_FIELD(cpu, val, reg, field) \
cpu                25 arch/mips/include/asm/mach-pnx833x/pnx833x.h 		(((val) & PNX##cpu##_##reg##_##field##_MASK) >> \
cpu                26 arch/mips/include/asm/mach-pnx833x/pnx833x.h 			PNX##cpu##_##reg##_##field##_SHIFT)
cpu                37 arch/mips/include/asm/mach-pnx833x/pnx833x.h #define PNX_WRITEFIELD(cpu, val, reg, field) \
cpu                38 arch/mips/include/asm/mach-pnx833x/pnx833x.h 	(PNX##cpu##_##reg = (PNX##cpu##_##reg & ~(PNX##cpu##_##reg##_##field##_MASK)) | \
cpu                39 arch/mips/include/asm/mach-pnx833x/pnx833x.h 						((val) << PNX##cpu##_##reg##_##field##_SHIFT))
cpu               396 arch/mips/include/asm/mips-cm.h static inline unsigned int mips_cm_vp_id(unsigned int cpu)
cpu               398 arch/mips/include/asm/mips-cm.h 	unsigned int core = cpu_core(&cpu_data[cpu]);
cpu               399 arch/mips/include/asm/mips-cm.h 	unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
cpu               452 arch/mips/include/asm/mips-cm.h static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block)
cpu               454 arch/mips/include/asm/mips-cm.h 	struct cpuinfo_mips *d = &cpu_data[cpu];
cpu                94 arch/mips/include/asm/mmu_context.h static inline u64 asid_version_mask(unsigned int cpu)
cpu                96 arch/mips/include/asm/mmu_context.h 	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
cpu               101 arch/mips/include/asm/mmu_context.h static inline u64 asid_first_version(unsigned int cpu)
cpu               103 arch/mips/include/asm/mmu_context.h 	return ~asid_version_mask(cpu) + 1;
cpu               106 arch/mips/include/asm/mmu_context.h static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
cpu               111 arch/mips/include/asm/mmu_context.h 	return mm->context.asid[cpu];
cpu               114 arch/mips/include/asm/mmu_context.h static inline void set_cpu_context(unsigned int cpu,
cpu               120 arch/mips/include/asm/mmu_context.h 		mm->context.asid[cpu] = ctx;
cpu               123 arch/mips/include/asm/mmu_context.h #define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
cpu               124 arch/mips/include/asm/mmu_context.h #define cpu_asid(cpu, mm) \
cpu               125 arch/mips/include/asm/mmu_context.h 	(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
cpu               161 arch/mips/include/asm/mmu_context.h 	unsigned int cpu = smp_processor_id();
cpu               172 arch/mips/include/asm/mmu_context.h 	cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpu               173 arch/mips/include/asm/mmu_context.h 	cpumask_set_cpu(cpu, mm_cpumask(next));
cpu               195 arch/mips/include/asm/mmu_context.h 	unsigned int cpu;
cpu               201 arch/mips/include/asm/mmu_context.h 	cpu = smp_processor_id();
cpu               202 arch/mips/include/asm/mmu_context.h 	ctx = cpu_context(cpu, mm);
cpu               216 arch/mips/include/asm/mmu_context.h 		write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
cpu               223 arch/mips/include/asm/mmu_context.h 	} else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
cpu               230 arch/mips/include/asm/mmu_context.h 		write_c0_entryhi(cpu_asid(cpu, mm));
cpu               234 arch/mips/include/asm/mmu_context.h 		set_cpu_context(cpu, mm, 0);
cpu               229 arch/mips/include/asm/netlogic/xlp-hal/pic.h 	int sch, int vec, int dt, int db, int cpu)
cpu               236 arch/mips/include/asm/netlogic/xlp-hal/pic.h 			(cpu & 0x3ff);
cpu               257 arch/mips/include/asm/netlogic/xlp-hal/pic.h 	int sch, int vec, int cpu)
cpu               261 arch/mips/include/asm/netlogic/xlp-hal/pic.h 							1, 0, cpu);
cpu               264 arch/mips/include/asm/netlogic/xlp-hal/pic.h 			(cpu >> 4),		/* thread group */
cpu               265 arch/mips/include/asm/netlogic/xlp-hal/pic.h 			1 << (cpu & 0xf));	/* thread mask */
cpu               287 arch/mips/include/asm/netlogic/xlp-hal/pic.h nlm_pic_set_timer(uint64_t base, int timer, uint64_t value, int irq, int cpu)
cpu               295 arch/mips/include/asm/netlogic/xlp-hal/pic.h 		en, 0, 0, irq, cpu);
cpu               349 arch/mips/include/asm/netlogic/xlr/fmn.h 	struct xlr_fmn_info cpu[8];
cpu               288 arch/mips/include/asm/netlogic/xlr/pic.h nlm_pic_set_timer(uint64_t base, int timer, uint64_t value, int irq, int cpu)
cpu               299 arch/mips/include/asm/netlogic/xlr/pic.h 	nlm_pic_init_irt(base, PIC_IRT_TIMER_INDEX(timer), irq, cpu, 0);
cpu               311 arch/mips/include/asm/octeon/octeon.h void octeon_ciu3_mbox_send(int cpu, unsigned int mbox);
cpu                15 arch/mips/include/asm/r4k-timer.h extern void synchronise_count_master(int cpu);
cpu                16 arch/mips/include/asm/r4k-timer.h extern void synchronise_count_slave(int cpu);
cpu                20 arch/mips/include/asm/r4k-timer.h static inline void synchronise_count_master(int cpu)
cpu                24 arch/mips/include/asm/r4k-timer.h static inline void synchronise_count_slave(int cpu)
cpu               361 arch/mips/include/asm/sibyte/bcm1480_regs.h #define A_BCM1480_IMR_MAPPER(cpu)	(A_BCM1480_IMR_CPU0_BASE+(cpu)*BCM1480_IMR_REGISTER_SPACING)
cpu               362 arch/mips/include/asm/sibyte/bcm1480_regs.h #define A_BCM1480_IMR_REGISTER(cpu, reg) (A_BCM1480_IMR_MAPPER(cpu)+(reg))
cpu               401 arch/mips/include/asm/sibyte/bcm1480_regs.h #define A_BCM1480_IMR_ALIAS_MAILBOX(cpu)     (A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE + \
cpu               402 arch/mips/include/asm/sibyte/bcm1480_regs.h 					(cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
cpu               403 arch/mips/include/asm/sibyte/bcm1480_regs.h #define A_BCM1480_IMR_ALIAS_MAILBOX_REGISTER(cpu, reg) (A_BCM1480_IMR_ALIAS_MAILBOX(cpu)+(reg))
cpu               417 arch/mips/include/asm/sibyte/bcm1480_regs.h #define A_BCM1480_MAILBOX_REGISTER(num, reg, cpu) \
cpu               420 arch/mips/include/asm/sibyte/bcm1480_regs.h      (cpu * BCM1480_IMR_REGISTER_SPACING) + \
cpu                36 arch/mips/include/asm/sibyte/sb1250.h extern void sb1250_mask_irq(int cpu, int irq);
cpu                37 arch/mips/include/asm/sibyte/sb1250.h extern void sb1250_unmask_irq(int cpu, int irq);
cpu                40 arch/mips/include/asm/sibyte/sb1250.h extern void bcm1480_mask_irq(int cpu, int irq);
cpu                41 arch/mips/include/asm/sibyte/sb1250.h extern void bcm1480_unmask_irq(int cpu, int irq);
cpu               703 arch/mips/include/asm/sibyte/sb1250_regs.h #define A_IMR_MAPPER(cpu) (A_IMR_CPU0_BASE+(cpu)*IMR_REGISTER_SPACING)
cpu               704 arch/mips/include/asm/sibyte/sb1250_regs.h #define A_IMR_REGISTER(cpu, reg) (A_IMR_MAPPER(cpu)+(reg))
cpu               729 arch/mips/include/asm/sibyte/sb1250_regs.h #define A_MAILBOX_REGISTER(reg,cpu) \
cpu               730 arch/mips/include/asm/sibyte/sb1250_regs.h     (A_IMR_CPU0_BASE + (cpu * IMR_REGISTER_SPACING) + reg)
cpu                25 arch/mips/include/asm/smp-ops.h 	void (*send_ipi_single)(int cpu, unsigned int action);
cpu                29 arch/mips/include/asm/smp-ops.h 	int (*boot_secondary)(int cpu, struct task_struct *idle);
cpu                35 arch/mips/include/asm/smp-ops.h 	void (*cpu_die)(unsigned int cpu);
cpu                51 arch/mips/include/asm/smp-ops.h extern void mips_smp_send_ipi_single(int cpu, unsigned int action);
cpu                35 arch/mips/include/asm/smp.h 	return current_thread_info()->cpu;
cpu                43 arch/mips/include/asm/smp.h #define cpu_number_map(cpu)  __cpu_number_map[cpu]
cpu                47 arch/mips/include/asm/smp.h #define cpu_logical_map(cpu)  __cpu_logical_map[cpu]
cpu                69 arch/mips/include/asm/smp.h static inline void smp_send_reschedule(int cpu)
cpu                73 arch/mips/include/asm/smp.h 	mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
cpu                84 arch/mips/include/asm/smp.h static inline void __cpu_die(unsigned int cpu)
cpu                88 arch/mips/include/asm/smp.h 	mp_ops->cpu_die(cpu);
cpu               124 arch/mips/include/asm/smp.h static inline void arch_send_call_function_single_ipi(int cpu)
cpu               128 arch/mips/include/asm/smp.h 	mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION);
cpu                20 arch/mips/include/asm/sn/arch.h #define cputonasid(cpu)		(sn_cpu_info[(cpu)].p_nasid)
cpu                21 arch/mips/include/asm/sn/arch.h #define cputoslice(cpu)		(sn_cpu_info[(cpu)].p_slice)
cpu                60 arch/mips/include/asm/sn/arch.h #define CPUID_TO_COMPACT_NODEID(cpu)	(cpuid_to_compact_node[(cpu)])
cpu               896 arch/mips/include/asm/sn/klconfig.h extern klcpu_t *sn_get_cpuinfo(cpuid_t cpu);
cpu                85 arch/mips/include/asm/sn/launch.h #define LAUNCH_SLAVE	(*(void (*)(int nasid, int cpu, \
cpu                92 arch/mips/include/asm/sn/launch.h #define LAUNCH_WAIT	(*(void (*)(int nasid, int cpu, int timeout_msec)) \
cpu                95 arch/mips/include/asm/sn/launch.h #define LAUNCH_POLL	(*(launch_state_t (*)(int nasid, int cpu)) \
cpu               130 arch/mips/include/asm/sn/sn0/hubio.h #define IIO_IGFX_INIT(widget, node, cpu, valid)				(\
cpu               133 arch/mips/include/asm/sn/sn0/hubio.h 	(((cpu)	   & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT) |	 \
cpu                29 arch/mips/include/asm/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                47 arch/mips/include/asm/thread_info.h 	.cpu		= 0,			\
cpu                15 arch/mips/include/asm/topology.h #define topology_physical_package_id(cpu)	(cpu_data[cpu].package)
cpu                16 arch/mips/include/asm/topology.h #define topology_core_id(cpu)			(cpu_core(&cpu_data[cpu]))
cpu                17 arch/mips/include/asm/topology.h #define topology_core_cpumask(cpu)		(&cpu_core_map[cpu])
cpu                18 arch/mips/include/asm/topology.h #define topology_sibling_cpumask(cpu)		(&cpu_sibling_map[cpu])
cpu               223 arch/mips/include/uapi/asm/kvm.h 	__u32 cpu;
cpu               138 arch/mips/jazz/irq.c 	unsigned int cpu = smp_processor_id();
cpu               142 arch/mips/jazz/irq.c 	cd->cpumask		= cpumask_of(cpu);
cpu                99 arch/mips/kernel/asm-offsets.c 	OFFSET(TI_CPU, thread_info, cpu);
cpu                20 arch/mips/kernel/cacheinfo.c static int __init_cache_level(unsigned int cpu)
cpu                23 arch/mips/kernel/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu                53 arch/mips/kernel/cacheinfo.c static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
cpu                58 arch/mips/kernel/cacheinfo.c 		if (cpus_are_siblings(cpu, cpu1))
cpu                62 arch/mips/kernel/cacheinfo.c static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
cpu                65 arch/mips/kernel/cacheinfo.c 	int cluster = cpu_cluster(&cpu_data[cpu]);
cpu                72 arch/mips/kernel/cacheinfo.c static int __populate_cache_leaves(unsigned int cpu)
cpu                75 arch/mips/kernel/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu                80 arch/mips/kernel/cacheinfo.c 		fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
cpu                82 arch/mips/kernel/cacheinfo.c 		fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
cpu                90 arch/mips/kernel/cacheinfo.c 		fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
cpu                33 arch/mips/kernel/cevt-bcm1480.c 	unsigned int cpu = smp_processor_id();
cpu                36 arch/mips/kernel/cevt-bcm1480.c 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
cpu                37 arch/mips/kernel/cevt-bcm1480.c 	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
cpu                47 arch/mips/kernel/cevt-bcm1480.c 	unsigned int cpu = smp_processor_id();
cpu                50 arch/mips/kernel/cevt-bcm1480.c 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
cpu                59 arch/mips/kernel/cevt-bcm1480.c 	unsigned int cpu = smp_processor_id();
cpu                62 arch/mips/kernel/cevt-bcm1480.c 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
cpu                63 arch/mips/kernel/cevt-bcm1480.c 	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
cpu                74 arch/mips/kernel/cevt-bcm1480.c 	unsigned int cpu = smp_processor_id();
cpu                85 arch/mips/kernel/cevt-bcm1480.c 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
cpu                99 arch/mips/kernel/cevt-bcm1480.c 	unsigned int cpu = smp_processor_id();
cpu               100 arch/mips/kernel/cevt-bcm1480.c 	unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
cpu               101 arch/mips/kernel/cevt-bcm1480.c 	struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
cpu               102 arch/mips/kernel/cevt-bcm1480.c 	struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
cpu               103 arch/mips/kernel/cevt-bcm1480.c 	unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
cpu               105 arch/mips/kernel/cevt-bcm1480.c 	BUG_ON(cpu > 3);	/* Only have 4 general purpose timers */
cpu               107 arch/mips/kernel/cevt-bcm1480.c 	sprintf(name, "bcm1480-counter-%d", cpu);
cpu               118 arch/mips/kernel/cevt-bcm1480.c 	cd->cpumask		= cpumask_of(cpu);
cpu               125 arch/mips/kernel/cevt-bcm1480.c 	bcm1480_mask_irq(cpu, irq);
cpu               131 arch/mips/kernel/cevt-bcm1480.c 		     IOADDR(A_BCM1480_IMR_REGISTER(cpu,
cpu               134 arch/mips/kernel/cevt-bcm1480.c 	bcm1480_unmask_irq(cpu, irq);
cpu               141 arch/mips/kernel/cevt-bcm1480.c 	irq_set_affinity(irq, cpumask_of(cpu));
cpu               133 arch/mips/kernel/cevt-r4k.c 	int cpu = smp_processor_id();
cpu               152 arch/mips/kernel/cevt-r4k.c 		cd = &per_cpu(mips_clockevent_device, cpu);
cpu               255 arch/mips/kernel/cevt-r4k.c 	unsigned int cpu = smp_processor_id();
cpu               272 arch/mips/kernel/cevt-r4k.c 	cd = &per_cpu(mips_clockevent_device, cpu);
cpu               283 arch/mips/kernel/cevt-r4k.c 	cd->cpumask		= cpumask_of(cpu);
cpu                43 arch/mips/kernel/cevt-sb1250.c 	unsigned int cpu = smp_processor_id();
cpu                46 arch/mips/kernel/cevt-sb1250.c 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
cpu                47 arch/mips/kernel/cevt-sb1250.c 	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
cpu                58 arch/mips/kernel/cevt-sb1250.c 	unsigned int cpu = smp_processor_id();
cpu                61 arch/mips/kernel/cevt-sb1250.c 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
cpu                62 arch/mips/kernel/cevt-sb1250.c 	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
cpu                73 arch/mips/kernel/cevt-sb1250.c 	unsigned int cpu = smp_processor_id();
cpu                84 arch/mips/kernel/cevt-sb1250.c 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
cpu                98 arch/mips/kernel/cevt-sb1250.c 	unsigned int cpu = smp_processor_id();
cpu                99 arch/mips/kernel/cevt-sb1250.c 	unsigned int irq = K_INT_TIMER_0 + cpu;
cpu               100 arch/mips/kernel/cevt-sb1250.c 	struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
cpu               101 arch/mips/kernel/cevt-sb1250.c 	struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
cpu               102 arch/mips/kernel/cevt-sb1250.c 	unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
cpu               105 arch/mips/kernel/cevt-sb1250.c 	BUG_ON(cpu > 2);
cpu               107 arch/mips/kernel/cevt-sb1250.c 	sprintf(name, "sb1250-counter-%d", cpu);
cpu               118 arch/mips/kernel/cevt-sb1250.c 	cd->cpumask		= cpumask_of(cpu);
cpu               125 arch/mips/kernel/cevt-sb1250.c 	sb1250_mask_irq(cpu, irq);
cpu               131 arch/mips/kernel/cevt-sb1250.c 		     IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) +
cpu               134 arch/mips/kernel/cevt-sb1250.c 	sb1250_unmask_irq(cpu, irq);
cpu               141 arch/mips/kernel/cevt-sb1250.c 	irq_set_affinity(irq, cpumask_of(cpu));
cpu               496 arch/mips/kernel/cpu-probe.c static inline void set_elf_platform(int cpu, const char *plat)
cpu               498 arch/mips/kernel/cpu-probe.c 	if (cpu == 0)
cpu              1266 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
cpu              1271 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "R2000";
cpu              1283 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "R3081";
cpu              1286 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "R3000A";
cpu              1290 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "R3000";
cpu              1304 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "R4400PC";
cpu              1307 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "R4000PC";
cpu              1333 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
cpu              1336 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
cpu              1355 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "NEC VR4111";
cpu              1359 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "NEC VR4121";
cpu              1364 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "NEC VR4122";
cpu              1367 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "NEC VR4181A";
cpu              1373 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "NEC VR4131";
cpu              1377 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "NEC VR4133";
cpu              1383 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "NEC Vr41xx";
cpu              1389 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "R4600";
cpu              1405 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "R4650";
cpu              1418 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "TX3927";
cpu              1424 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "TX3912";
cpu              1429 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "TX3922";
cpu              1437 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "R4700";
cpu              1446 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "R49XX";
cpu              1456 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "R5000";
cpu              1464 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "R5500";
cpu              1472 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Nevada";
cpu              1480 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "RM7000";
cpu              1496 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "R10000";
cpu              1506 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "R12000";
cpu              1517 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "R16000";
cpu              1520 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "R14000";
cpu              1533 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "ICT Loongson-2";
cpu              1534 arch/mips/kernel/cpu-probe.c 			set_elf_platform(cpu, "loongson2e");
cpu              1540 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "ICT Loongson-2";
cpu              1541 arch/mips/kernel/cpu-probe.c 			set_elf_platform(cpu, "loongson2f");
cpu              1547 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "ICT Loongson-3";
cpu              1548 arch/mips/kernel/cpu-probe.c 			set_elf_platform(cpu, "loongson3a");
cpu              1556 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "ICT Loongson-3";
cpu              1557 arch/mips/kernel/cpu-probe.c 			set_elf_platform(cpu, "loongson3b");
cpu              1577 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Loongson 1B";
cpu              1585 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
cpu              1592 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS GENERIC QEMU";
cpu              1597 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 4Kc";
cpu              1603 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 4KEc";
cpu              1609 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 4KSc";
cpu              1614 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 5Kc";
cpu              1619 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 5KE";
cpu              1624 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 20Kc";
cpu              1629 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 24Kc";
cpu              1634 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 24KEc";
cpu              1639 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 25Kc";
cpu              1644 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 34Kc";
cpu              1650 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 74Kc";
cpu              1655 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS M14Kc";
cpu              1660 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS M14KEc";
cpu              1665 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 1004Kc";
cpu              1671 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS 1074Kc";
cpu              1675 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS interAptiv";
cpu              1680 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS interAptiv (multi)";
cpu              1685 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS proAptiv";
cpu              1689 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS proAptiv (multi)";
cpu              1693 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS P5600";
cpu              1697 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS P6600";
cpu              1701 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS I6400";
cpu              1705 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS I6500";
cpu              1709 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS M5150";
cpu              1713 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "MIPS M6250";
cpu              1733 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
cpu              1742 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Au1000";
cpu              1745 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Au1500";
cpu              1748 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Au1100";
cpu              1751 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Au1550";
cpu              1754 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Au1200";
cpu              1756 arch/mips/kernel/cpu-probe.c 				__cpu_name[cpu] = "Au1250";
cpu              1759 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Au1210";
cpu              1762 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Au1xxx";
cpu              1769 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
cpu              1777 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "SiByte SB1";
cpu              1784 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "SiByte SB1A";
cpu              1789 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
cpu              1795 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Sandcraft SR71000";
cpu              1802 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
cpu              1808 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Philips PR4450";
cpu              1814 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
cpu              1821 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Broadcom BMIPS32";
cpu              1822 arch/mips/kernel/cpu-probe.c 		set_elf_platform(cpu, "bmips32");
cpu              1828 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Broadcom BMIPS3300";
cpu              1829 arch/mips/kernel/cpu-probe.c 		set_elf_platform(cpu, "bmips3300");
cpu              1837 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Broadcom BMIPS4380";
cpu              1838 arch/mips/kernel/cpu-probe.c 			set_elf_platform(cpu, "bmips4380");
cpu              1842 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Broadcom BMIPS4350";
cpu              1843 arch/mips/kernel/cpu-probe.c 			set_elf_platform(cpu, "bmips4350");
cpu              1851 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Broadcom BMIPS5200";
cpu              1853 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "Broadcom BMIPS5000";
cpu              1854 arch/mips/kernel/cpu-probe.c 		set_elf_platform(cpu, "bmips5000");
cpu              1860 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
cpu              1868 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Cavium Octeon";
cpu              1875 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Cavium Octeon+";
cpu              1877 arch/mips/kernel/cpu-probe.c 		set_elf_platform(cpu, "octeon");
cpu              1885 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Cavium Octeon II";
cpu              1886 arch/mips/kernel/cpu-probe.c 		set_elf_platform(cpu, "octeon2");
cpu              1893 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Cavium Octeon III";
cpu              1894 arch/mips/kernel/cpu-probe.c 		set_elf_platform(cpu, "octeon3");
cpu              1903 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
cpu              1911 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "ICT Loongson-3";
cpu              1912 arch/mips/kernel/cpu-probe.c 			set_elf_platform(cpu, "loongson3a");
cpu              1918 arch/mips/kernel/cpu-probe.c 			__cpu_name[cpu] = "ICT Loongson-3";
cpu              1919 arch/mips/kernel/cpu-probe.c 			set_elf_platform(cpu, "loongson3a");
cpu              1936 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
cpu              1954 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Ingenic JZRISC";
cpu              1977 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
cpu              1983 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Au1300";
cpu              2001 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Broadcom XLPII";
cpu              2007 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Netlogic XLP";
cpu              2019 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Netlogic XLR";
cpu              2036 arch/mips/kernel/cpu-probe.c 		__cpu_name[cpu] = "Netlogic XLS";
cpu              2070 arch/mips/kernel/cpu-probe.c 	unsigned int cpu = smp_processor_id();
cpu              2076 arch/mips/kernel/cpu-probe.c 	set_elf_platform(cpu, "mips");
cpu              2089 arch/mips/kernel/cpu-probe.c 		cpu_probe_legacy(c, cpu);
cpu              2092 arch/mips/kernel/cpu-probe.c 		cpu_probe_mips(c, cpu);
cpu              2095 arch/mips/kernel/cpu-probe.c 		cpu_probe_alchemy(c, cpu);
cpu              2098 arch/mips/kernel/cpu-probe.c 		cpu_probe_sibyte(c, cpu);
cpu              2101 arch/mips/kernel/cpu-probe.c 		cpu_probe_broadcom(c, cpu);
cpu              2104 arch/mips/kernel/cpu-probe.c 		cpu_probe_sandcraft(c, cpu);
cpu              2107 arch/mips/kernel/cpu-probe.c 		cpu_probe_nxp(c, cpu);
cpu              2110 arch/mips/kernel/cpu-probe.c 		cpu_probe_cavium(c, cpu);
cpu              2113 arch/mips/kernel/cpu-probe.c 		cpu_probe_loongson(c, cpu);
cpu              2118 arch/mips/kernel/cpu-probe.c 		cpu_probe_ingenic(c, cpu);
cpu              2121 arch/mips/kernel/cpu-probe.c 		cpu_probe_netlogic(c, cpu);
cpu              2125 arch/mips/kernel/cpu-probe.c 	BUG_ON(!__cpu_name[cpu]);
cpu              2222 arch/mips/kernel/cpu-probe.c 	if (cpu == 0)
cpu                22 arch/mips/kernel/crash.c 	int cpu = smp_processor_id();
cpu                36 arch/mips/kernel/crash.c 	if (!cpu_online(cpu))
cpu                40 arch/mips/kernel/crash.c 	set_cpu_online(cpu, false);
cpu                43 arch/mips/kernel/crash.c 	if (!cpumask_test_cpu(cpu, &cpus_in_crash))
cpu                44 arch/mips/kernel/crash.c 		crash_save_cpu(regs, cpu);
cpu                45 arch/mips/kernel/crash.c 	cpumask_set_cpu(cpu, &cpus_in_crash);
cpu               117 arch/mips/kernel/machine_kexec.c 	int cpu = smp_processor_id();
cpu               119 arch/mips/kernel/machine_kexec.c 	if (!cpu_online(cpu))
cpu               123 arch/mips/kernel/machine_kexec.c 	set_cpu_online(cpu, false);
cpu               204 arch/mips/kernel/mips-cm.c 	unsigned cpu;
cpu               250 arch/mips/kernel/mips-cm.c 	for_each_possible_cpu(cpu)
cpu               251 arch/mips/kernel/mips-cm.c 		spin_lock_init(&per_cpu(cm_core_lock, cpu));
cpu                72 arch/mips/kernel/mips-cpc.c 	unsigned int cpu;
cpu                74 arch/mips/kernel/mips-cpc.c 	for_each_possible_cpu(cpu)
cpu                75 arch/mips/kernel/mips-cpc.c 		spin_lock_init(&per_cpu(cpc_core_lock, cpu));
cpu               337 arch/mips/kernel/perf_event_mipsxx.c 		unsigned int cpu, ctrl;
cpu               344 arch/mips/kernel/perf_event_mipsxx.c 		cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
cpu               346 arch/mips/kernel/perf_event_mipsxx.c 		ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
cpu               349 arch/mips/kernel/perf_event_mipsxx.c 		pr_debug("Enabling perf counter for CPU%d\n", cpu);
cpu               627 arch/mips/kernel/perf_event_mipsxx.c 	if (event->cpu >= 0 && !cpu_online(event->cpu))
cpu               112 arch/mips/kernel/pm-cps.c 	unsigned cpu = smp_processor_id();
cpu               129 arch/mips/kernel/pm-cps.c 	if (cpu_online(cpu)) {
cpu               131 arch/mips/kernel/pm-cps.c 			    &cpu_sibling_map[cpu]);
cpu               133 arch/mips/kernel/pm-cps.c 		cpumask_clear_cpu(cpu, coupled_mask);
cpu               155 arch/mips/kernel/pm-cps.c 	cpumask_clear_cpu(cpu, &cpu_coherent_mask);
cpu               176 arch/mips/kernel/pm-cps.c 	cpumask_set_cpu(cpu, &cpu_coherent_mask);
cpu               342 arch/mips/kernel/pm-cps.c static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
cpu               447 arch/mips/kernel/pm-cps.c 				vpe_id = cpu_vpe_id(&cpu_data[cpu]);
cpu               468 arch/mips/kernel/pm-cps.c 	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
cpu               472 arch/mips/kernel/pm-cps.c 	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
cpu               485 arch/mips/kernel/pm-cps.c 		uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
cpu               499 arch/mips/kernel/pm-cps.c 		err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
cpu               636 arch/mips/kernel/pm-cps.c static int cps_pm_online_cpu(unsigned int cpu)
cpu               639 arch/mips/kernel/pm-cps.c 	unsigned core = cpu_core(&cpu_data[cpu]);
cpu               648 arch/mips/kernel/pm-cps.c 		entry_fn = cps_gen_entry_code(cpu, state);
cpu                41 arch/mips/kernel/pm.c 	unsigned int cpu = smp_processor_id();
cpu                45 arch/mips/kernel/pm.c 		write_c0_entryhi(cpu_asid(cpu, current->mm));
cpu               604 arch/mips/kernel/process.c 	int cpu;
cpu               606 arch/mips/kernel/process.c 	for_each_possible_cpu(cpu) {
cpu               607 arch/mips/kernel/process.c 		if (on_irq_stack(cpu, *sp)) {
cpu               608 arch/mips/kernel/process.c 			stack_page = (unsigned long)irq_stack[cpu];
cpu               696 arch/mips/kernel/process.c 	int cpu;
cpu               698 arch/mips/kernel/process.c 	for_each_cpu(cpu, mask) {
cpu               705 arch/mips/kernel/process.c 		if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
cpu               707 arch/mips/kernel/process.c 				cpu);
cpu               711 arch/mips/kernel/process.c 		csd = &per_cpu(backtrace_csd, cpu);
cpu               713 arch/mips/kernel/process.c 		smp_call_function_single_async(cpu, csd);
cpu               751 arch/mips/kernel/process.c 	int cpu;
cpu               826 arch/mips/kernel/process.c 	for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
cpu               827 arch/mips/kernel/process.c 		work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
cpu                54 arch/mips/kernel/smp-bmips.c static void bmips_set_reset_vec(int cpu, u32 val);
cpu                62 arch/mips/kernel/smp-bmips.c static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
cpu                63 arch/mips/kernel/smp-bmips.c static void bmips5000_send_ipi_single(int cpu, unsigned int action);
cpu                71 arch/mips/kernel/smp-bmips.c #define CPUNUM(cpu, shift)		(((cpu) + bmips_cpu_offset) << (shift))
cpu                72 arch/mips/kernel/smp-bmips.c #define ACTION_CLR_IPI(cpu, ipi)	(0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
cpu                73 arch/mips/kernel/smp-bmips.c #define ACTION_SET_IPI(cpu, ipi)	(0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
cpu                74 arch/mips/kernel/smp-bmips.c #define ACTION_BOOT_THREAD(cpu)		(0x08 | CPUNUM(cpu, 0))
cpu                78 arch/mips/kernel/smp-bmips.c 	int i, cpu = 1, boot_cpu = 0;
cpu               144 arch/mips/kernel/smp-bmips.c 			__cpu_number_map[i] = cpu;
cpu               145 arch/mips/kernel/smp-bmips.c 			__cpu_logical_map[cpu] = i;
cpu               146 arch/mips/kernel/smp-bmips.c 			cpu++;
cpu               183 arch/mips/kernel/smp-bmips.c static int bmips_boot_secondary(int cpu, struct task_struct *idle)
cpu               204 arch/mips/kernel/smp-bmips.c 	pr_info("SMP: Booting CPU%d...\n", cpu);
cpu               206 arch/mips/kernel/smp-bmips.c 	if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
cpu               208 arch/mips/kernel/smp-bmips.c 		bmips_set_reset_vec(cpu, RESET_FROM_KSEG0);
cpu               213 arch/mips/kernel/smp-bmips.c 			bmips43xx_send_ipi_single(cpu, 0);
cpu               216 arch/mips/kernel/smp-bmips.c 			bmips5000_send_ipi_single(cpu, 0);
cpu               220 arch/mips/kernel/smp-bmips.c 		bmips_set_reset_vec(cpu, RESET_FROM_KSEG1);
cpu               226 arch/mips/kernel/smp-bmips.c 			if (cpu_logical_map(cpu) == 1)
cpu               230 arch/mips/kernel/smp-bmips.c 			write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
cpu               233 arch/mips/kernel/smp-bmips.c 		cpumask_set_cpu(cpu, &bmips_booted_mask);
cpu               279 arch/mips/kernel/smp-bmips.c static void bmips5000_send_ipi_single(int cpu, unsigned int action)
cpu               281 arch/mips/kernel/smp-bmips.c 	write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
cpu               320 arch/mips/kernel/smp-bmips.c static void bmips43xx_send_ipi_single(int cpu, unsigned int action)
cpu               325 arch/mips/kernel/smp-bmips.c 	set_c0_cause(cpu ? C_SW1 : C_SW0);
cpu               326 arch/mips/kernel/smp-bmips.c 	per_cpu(ipi_action_mask, cpu) |= action;
cpu               334 arch/mips/kernel/smp-bmips.c 	int action, cpu = irq - IPI0_IRQ;
cpu               338 arch/mips/kernel/smp-bmips.c 	per_cpu(ipi_action_mask, cpu) = 0;
cpu               339 arch/mips/kernel/smp-bmips.c 	clear_c0_cause(cpu ? C_SW1 : C_SW0);
cpu               363 arch/mips/kernel/smp-bmips.c 	unsigned int cpu = smp_processor_id();
cpu               365 arch/mips/kernel/smp-bmips.c 	if (cpu == 0)
cpu               368 arch/mips/kernel/smp-bmips.c 	pr_info("SMP: CPU%d is offline\n", cpu);
cpu               370 arch/mips/kernel/smp-bmips.c 	set_cpu_online(cpu, false);
cpu               381 arch/mips/kernel/smp-bmips.c static void bmips_cpu_die(unsigned int cpu)
cpu               474 arch/mips/kernel/smp-bmips.c 	int cpu;
cpu               481 arch/mips/kernel/smp-bmips.c 	int shift = info->cpu & 0x01 ? 16 : 0;
cpu               489 arch/mips/kernel/smp-bmips.c 		if (info->cpu & 0x02) {
cpu               501 arch/mips/kernel/smp-bmips.c static void bmips_set_reset_vec(int cpu, u32 val)
cpu               507 arch/mips/kernel/smp-bmips.c 		info.cpu = cpu;
cpu               513 arch/mips/kernel/smp-bmips.c 		if (cpu == 0)
cpu                69 arch/mips/kernel/smp-cmp.c static int cmp_boot_secondary(int cpu, struct task_struct *idle)
cpu                77 arch/mips/kernel/smp-cmp.c 		__func__, cpu);
cpu                85 arch/mips/kernel/smp-cmp.c 	amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0);
cpu               294 arch/mips/kernel/smp-cps.c static int cps_boot_secondary(int cpu, struct task_struct *idle)
cpu               296 arch/mips/kernel/smp-cps.c 	unsigned core = cpu_core(&cpu_data[cpu]);
cpu               297 arch/mips/kernel/smp-cps.c 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
cpu               305 arch/mips/kernel/smp-cps.c 	if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
cpu               312 arch/mips/kernel/smp-cps.c 	atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
cpu               329 arch/mips/kernel/smp-cps.c 	if (!cpus_are_siblings(cpu, smp_processor_id())) {
cpu               332 arch/mips/kernel/smp-cps.c 			if (!cpus_are_siblings(cpu, remote))
cpu               339 arch/mips/kernel/smp-cps.c 				core, cpu);
cpu               406 arch/mips/kernel/smp-cps.c 	unsigned int cpu, core, vpe_id;
cpu               408 arch/mips/kernel/smp-cps.c 	cpu = smp_processor_id();
cpu               409 arch/mips/kernel/smp-cps.c 	core = cpu_core(&cpu_data[cpu]);
cpu               412 arch/mips/kernel/smp-cps.c 		vpe_id = cpu_vpe_id(&cpu_data[cpu]);
cpu               450 arch/mips/kernel/smp-cps.c 	unsigned cpu = smp_processor_id();
cpu               453 arch/mips/kernel/smp-cps.c 	if (!cpu)
cpu               462 arch/mips/kernel/smp-cps.c 	set_cpu_online(cpu, false);
cpu               473 arch/mips/kernel/smp-cps.c 	unsigned int cpu;
cpu               477 arch/mips/kernel/smp-cps.c 	cpu = smp_processor_id();
cpu               480 arch/mips/kernel/smp-cps.c 	pr_debug("CPU%d going offline\n", cpu);
cpu               485 arch/mips/kernel/smp-cps.c 			if (!cpus_are_siblings(cpu, cpu_death_sibling))
cpu               503 arch/mips/kernel/smp-cps.c 	panic("Failed to offline CPU %u", cpu);
cpu               508 arch/mips/kernel/smp-cps.c 	unsigned cpu = (unsigned long)ptr_cpu;
cpu               509 arch/mips/kernel/smp-cps.c 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
cpu               521 arch/mips/kernel/smp-cps.c static void cps_cpu_die(unsigned int cpu)
cpu               523 arch/mips/kernel/smp-cps.c 	unsigned core = cpu_core(&cpu_data[cpu]);
cpu               524 arch/mips/kernel/smp-cps.c 	unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
cpu               530 arch/mips/kernel/smp-cps.c 	if (!cpu_wait_death(cpu, 5)) {
cpu               531 arch/mips/kernel/smp-cps.c 		pr_err("CPU%u: didn't offline\n", cpu);
cpu               580 arch/mips/kernel/smp-cps.c 				 cpu, stat))
cpu               593 arch/mips/kernel/smp-cps.c 					       (void *)(unsigned long)cpu, 1);
cpu               142 arch/mips/kernel/smp-mt.c static int vsmp_boot_secondary(int cpu, struct task_struct *idle)
cpu               148 arch/mips/kernel/smp-mt.c 	settc(cpu);
cpu                16 arch/mips/kernel/smp-up.c static void up_send_ipi_single(int cpu, unsigned int action)
cpu                42 arch/mips/kernel/smp-up.c static int up_boot_secondary(int cpu, struct task_struct *idle)
cpu                61 arch/mips/kernel/smp-up.c static void up_cpu_die(unsigned int cpu)
cpu                81 arch/mips/kernel/smp.c static inline void set_cpu_sibling_map(int cpu)
cpu                85 arch/mips/kernel/smp.c 	cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
cpu                89 arch/mips/kernel/smp.c 			if (cpus_are_siblings(cpu, i)) {
cpu                90 arch/mips/kernel/smp.c 				cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
cpu                91 arch/mips/kernel/smp.c 				cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
cpu                95 arch/mips/kernel/smp.c 		cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
cpu                98 arch/mips/kernel/smp.c static inline void set_cpu_core_map(int cpu)
cpu               102 arch/mips/kernel/smp.c 	cpumask_set_cpu(cpu, &cpu_core_setup_map);
cpu               105 arch/mips/kernel/smp.c 		if (cpu_data[cpu].package == cpu_data[i].package) {
cpu               106 arch/mips/kernel/smp.c 			cpumask_set_cpu(i, &cpu_core_map[cpu]);
cpu               107 arch/mips/kernel/smp.c 			cpumask_set_cpu(cpu, &cpu_core_map[i]);
cpu               149 arch/mips/kernel/smp.c void mips_smp_send_ipi_single(int cpu, unsigned int action)
cpu               151 arch/mips/kernel/smp.c 	mips_smp_send_ipi_mask(cpumask_of(cpu), action);
cpu               158 arch/mips/kernel/smp.c 	int cpu;
cpu               176 arch/mips/kernel/smp.c 		for_each_cpu(cpu, mask) {
cpu               177 arch/mips/kernel/smp.c 			if (cpus_are_siblings(cpu, smp_processor_id()))
cpu               180 arch/mips/kernel/smp.c 			core = cpu_core(&cpu_data[cpu]);
cpu               182 arch/mips/kernel/smp.c 			while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
cpu               183 arch/mips/kernel/smp.c 				mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
cpu               278 arch/mips/kernel/smp.c 		int cpu;
cpu               280 arch/mips/kernel/smp.c 		for_each_cpu(cpu, mask) {
cpu               281 arch/mips/kernel/smp.c 			smp_ipi_init_one(call_virq + cpu, &irq_call);
cpu               282 arch/mips/kernel/smp.c 			smp_ipi_init_one(sched_virq + cpu, &irq_resched);
cpu               311 arch/mips/kernel/smp.c 		int cpu;
cpu               313 arch/mips/kernel/smp.c 		for_each_cpu(cpu, mask) {
cpu               314 arch/mips/kernel/smp.c 			remove_irq(call_virq + cpu, &irq_call);
cpu               315 arch/mips/kernel/smp.c 			remove_irq(sched_virq + cpu, &irq_resched);
cpu               345 arch/mips/kernel/smp.c 	unsigned int cpu;
cpu               361 arch/mips/kernel/smp.c 	cpu = smp_processor_id();
cpu               362 arch/mips/kernel/smp.c 	cpu_data[cpu].udelay_val = loops_per_jiffy;
cpu               364 arch/mips/kernel/smp.c 	cpumask_set_cpu(cpu, &cpu_coherent_mask);
cpu               365 arch/mips/kernel/smp.c 	notify_cpu_starting(cpu);
cpu               370 arch/mips/kernel/smp.c 	synchronise_count_slave(cpu);
cpu               373 arch/mips/kernel/smp.c 	set_cpu_online(cpu, true);
cpu               375 arch/mips/kernel/smp.c 	set_cpu_sibling_map(cpu);
cpu               376 arch/mips/kernel/smp.c 	set_cpu_core_map(cpu);
cpu               421 arch/mips/kernel/smp.c 	current_thread_info()->cpu = 0;
cpu               441 arch/mips/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu               445 arch/mips/kernel/smp.c 	err = mp_ops->boot_secondary(cpu, tidle);
cpu               452 arch/mips/kernel/smp.c 		pr_crit("CPU%u: failed to start\n", cpu);
cpu               456 arch/mips/kernel/smp.c 	synchronise_count_master(cpu);
cpu               541 arch/mips/kernel/smp.c 		unsigned int cpu;
cpu               543 arch/mips/kernel/smp.c 		for_each_online_cpu(cpu) {
cpu               544 arch/mips/kernel/smp.c 			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu               545 arch/mips/kernel/smp.c 				set_cpu_context(cpu, mm, 0);
cpu               598 arch/mips/kernel/smp.c 		unsigned int cpu;
cpu               601 arch/mips/kernel/smp.c 		for_each_online_cpu(cpu) {
cpu               608 arch/mips/kernel/smp.c 			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu               609 arch/mips/kernel/smp.c 				set_cpu_context(cpu, mm, !exec);
cpu               665 arch/mips/kernel/smp.c 		unsigned int cpu;
cpu               667 arch/mips/kernel/smp.c 		for_each_online_cpu(cpu) {
cpu               674 arch/mips/kernel/smp.c 			if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
cpu               675 arch/mips/kernel/smp.c 				set_cpu_context(cpu, vma->vm_mm, 1);
cpu               706 arch/mips/kernel/smp.c 	int cpu;
cpu               708 arch/mips/kernel/smp.c 	for_each_cpu(cpu, mask) {
cpu               709 arch/mips/kernel/smp.c 		count = &per_cpu(tick_broadcast_count, cpu);
cpu               710 arch/mips/kernel/smp.c 		csd = &per_cpu(tick_broadcast_csd, cpu);
cpu               713 arch/mips/kernel/smp.c 			smp_call_function_single_async(cpu, csd);
cpu               719 arch/mips/kernel/smp.c 	int cpu = smp_processor_id();
cpu               721 arch/mips/kernel/smp.c 	atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
cpu               727 arch/mips/kernel/smp.c 	int cpu;
cpu               729 arch/mips/kernel/smp.c 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpu               730 arch/mips/kernel/smp.c 		csd = &per_cpu(tick_broadcast_csd, cpu);
cpu                28 arch/mips/kernel/sync-r4k.c void synchronise_count_master(int cpu)
cpu                33 arch/mips/kernel/sync-r4k.c 	pr_info("Synchronize counters for CPU %u: ", cpu);
cpu                90 arch/mips/kernel/sync-r4k.c void synchronise_count_slave(int cpu)
cpu                 9 arch/mips/kernel/topology.c static DEFINE_PER_CPU(struct cpu, cpu_devices);
cpu                21 arch/mips/kernel/topology.c 		struct cpu *c = &per_cpu(cpu_devices, i);
cpu              2183 arch/mips/kernel/traps.c 	unsigned int cpu = smp_processor_id();
cpu              2213 arch/mips/kernel/traps.c 		cpu_data[cpu].asid_cache = 0;
cpu              2214 arch/mips/kernel/traps.c 	else if (!cpu_data[cpu].asid_cache)
cpu              2215 arch/mips/kernel/traps.c 		cpu_data[cpu].asid_cache = asid_first_version(cpu);
cpu               999 arch/mips/kvm/emulate.c 	int cpu, i;
cpu              1019 arch/mips/kvm/emulate.c 		cpu = smp_processor_id();
cpu              1022 arch/mips/kvm/emulate.c 			if (i != cpu)
cpu              1065 arch/mips/kvm/emulate.c 	int cpu, i;
cpu              1089 arch/mips/kvm/emulate.c 	cpu = smp_processor_id();
cpu              1091 arch/mips/kvm/emulate.c 		if (i == cpu)
cpu               385 arch/mips/kvm/entry.c 	uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
cpu               486 arch/mips/kvm/mips.c 		kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
cpu               489 arch/mips/kvm/mips.c 	if (irq->cpu == -1)
cpu               492 arch/mips/kvm/mips.c 		dvcpu = vcpu->kvm->vcpus[irq->cpu];
cpu               501 arch/mips/kvm/mips.c 			irq->cpu, irq->irq);
cpu              1136 arch/mips/kvm/mmu.c void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu              1140 arch/mips/kvm/mmu.c 	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
cpu              1144 arch/mips/kvm/mmu.c 	vcpu->cpu = cpu;
cpu              1145 arch/mips/kvm/mmu.c 	if (vcpu->arch.last_sched_cpu != cpu) {
cpu              1147 arch/mips/kvm/mmu.c 			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
cpu              1157 arch/mips/kvm/mmu.c 	kvm_mips_callbacks->vcpu_load(vcpu, cpu);
cpu              1166 arch/mips/kvm/mmu.c 	int cpu;
cpu              1170 arch/mips/kvm/mmu.c 	cpu = smp_processor_id();
cpu              1171 arch/mips/kvm/mmu.c 	vcpu->arch.last_sched_cpu = cpu;
cpu              1172 arch/mips/kvm/mmu.c 	vcpu->cpu = -1;
cpu              1175 arch/mips/kvm/mmu.c 	kvm_mips_callbacks->vcpu_put(vcpu, cpu);
cpu                58 arch/mips/kvm/tlb.c 	int cpu = smp_processor_id();
cpu                60 arch/mips/kvm/tlb.c 	return cpu_asid(cpu, kern_mm);
cpu                66 arch/mips/kvm/tlb.c 	int cpu = smp_processor_id();
cpu                68 arch/mips/kvm/tlb.c 	return cpu_asid(cpu, user_mm);
cpu               641 arch/mips/kvm/tlb.c void kvm_mips_suspend_mm(int cpu)
cpu               643 arch/mips/kvm/tlb.c 	cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
cpu               655 arch/mips/kvm/tlb.c void kvm_mips_resume_mm(int cpu)
cpu               657 arch/mips/kvm/tlb.c 	cpumask_set_cpu(cpu, mm_cpumask(current->mm));
cpu              1047 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu              1060 arch/mips/kvm/trap_emul.c 		kvm_mips_suspend_mm(cpu);
cpu              1067 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
cpu              1074 arch/mips/kvm/trap_emul.c 		kvm_mips_resume_mm(cpu);
cpu              1081 arch/mips/kvm/trap_emul.c static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
cpu              1110 arch/mips/kvm/trap_emul.c 			write_c0_entryhi(cpu_asid(cpu, mm));
cpu              1189 arch/mips/kvm/trap_emul.c 	int i, cpu = smp_processor_id();
cpu              1197 arch/mips/kvm/trap_emul.c 	kvm_trap_emul_check_requests(vcpu, cpu, false);
cpu              1228 arch/mips/kvm/trap_emul.c 	int cpu = smp_processor_id();
cpu              1251 arch/mips/kvm/trap_emul.c 	kvm_mips_suspend_mm(cpu);
cpu              1256 arch/mips/kvm/trap_emul.c 	cpu = smp_processor_id();
cpu              1260 arch/mips/kvm/trap_emul.c 	kvm_mips_resume_mm(cpu);
cpu              2312 arch/mips/kvm/vz.c #define guestid_cache(cpu)	(cpu_data[cpu].guestid_cache)
cpu              2313 arch/mips/kvm/vz.c static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
cpu              2315 arch/mips/kvm/vz.c 	unsigned long guestid = guestid_cache(cpu);
cpu              2331 arch/mips/kvm/vz.c 	guestid_cache(cpu) = guestid;
cpu              2335 arch/mips/kvm/vz.c static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
cpu              2403 arch/mips/kvm/vz.c static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
cpu              2413 arch/mips/kvm/vz.c 	migrated = (vcpu->arch.last_exec_cpu != cpu);
cpu              2414 arch/mips/kvm/vz.c 	vcpu->arch.last_exec_cpu = cpu;
cpu              2431 arch/mips/kvm/vz.c 		    (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
cpu              2433 arch/mips/kvm/vz.c 			kvm_vz_get_new_guestid(cpu, vcpu);
cpu              2434 arch/mips/kvm/vz.c 			vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
cpu              2436 arch/mips/kvm/vz.c 						 vcpu->arch.vzguestid[cpu]);
cpu              2440 arch/mips/kvm/vz.c 		change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
cpu              2449 arch/mips/kvm/vz.c 		if (migrated || last_exec_vcpu[cpu] != vcpu)
cpu              2451 arch/mips/kvm/vz.c 		last_exec_vcpu[cpu] = vcpu;
cpu              2457 arch/mips/kvm/vz.c 		if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
cpu              2464 arch/mips/kvm/vz.c static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu              2473 arch/mips/kvm/vz.c 	migrated = (vcpu->arch.last_sched_cpu != cpu);
cpu              2479 arch/mips/kvm/vz.c 	all = migrated || (last_vcpu[cpu] != vcpu);
cpu              2480 arch/mips/kvm/vz.c 	last_vcpu[cpu] = vcpu;
cpu              2489 arch/mips/kvm/vz.c 		kvm_vz_vcpu_load_tlb(vcpu, cpu);
cpu              2606 arch/mips/kvm/vz.c static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
cpu              2950 arch/mips/kvm/vz.c 	int cpu;
cpu              2957 arch/mips/kvm/vz.c 	for_each_possible_cpu(cpu) {
cpu              2958 arch/mips/kvm/vz.c 		if (last_vcpu[cpu] == vcpu)
cpu              2959 arch/mips/kvm/vz.c 			last_vcpu[cpu] = NULL;
cpu              2960 arch/mips/kvm/vz.c 		if (last_exec_vcpu[cpu] == vcpu)
cpu              2961 arch/mips/kvm/vz.c 			last_exec_vcpu[cpu] = NULL;
cpu              3134 arch/mips/kvm/vz.c 	int cpu = smp_processor_id();
cpu              3137 arch/mips/kvm/vz.c 	preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
cpu              3142 arch/mips/kvm/vz.c 	kvm_vz_vcpu_load_tlb(vcpu, cpu);
cpu              3150 arch/mips/kvm/vz.c 	int cpu = smp_processor_id();
cpu              3157 arch/mips/kvm/vz.c 	kvm_vz_check_requests(vcpu, cpu);
cpu              3158 arch/mips/kvm/vz.c 	kvm_vz_vcpu_load_tlb(vcpu, cpu);
cpu                29 arch/mips/lantiq/clk.c void clkdev_add_static(unsigned long cpu, unsigned long fpi,
cpu                32 arch/mips/lantiq/clk.c 	cpu_clk_generic[0].rate = cpu;
cpu                72 arch/mips/lantiq/clk.h extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
cpu               117 arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c 	unsigned int cpu = smp_processor_id();
cpu               119 arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c 	cd->cpumask = cpumask_of(cpu);
cpu               177 arch/mips/loongson64/loongson-3/hpet.c 	unsigned int cpu = smp_processor_id();
cpu               183 arch/mips/loongson64/loongson-3/hpet.c 		cd = &per_cpu(hpet_clockevent_device, cpu);
cpu               227 arch/mips/loongson64/loongson-3/hpet.c 	unsigned int cpu = smp_processor_id();
cpu               232 arch/mips/loongson64/loongson-3/hpet.c 	cd = &per_cpu(hpet_clockevent_device, cpu);
cpu               242 arch/mips/loongson64/loongson-3/hpet.c 	cd->cpumask = cpumask_of(cpu);
cpu                13 arch/mips/loongson64/loongson-3/irq.c extern void loongson3_send_irq_by_ipi(int cpu, int irqs);
cpu                22 arch/mips/loongson64/loongson-3/irq.c 	unsigned int cpu;
cpu                27 arch/mips/loongson64/loongson-3/irq.c 	for_each_cpu(cpu, affinity)
cpu                28 arch/mips/loongson64/loongson-3/irq.c 		if (cpu_data[cpu].package > 0)
cpu                29 arch/mips/loongson64/loongson-3/irq.c 			cpumask_clear_cpu(cpu, &new_affinity);
cpu               208 arch/mips/loongson64/loongson-3/numa.c 	unsigned int node, cpu, active_cpu = 0;
cpu               223 arch/mips/loongson64/loongson-3/numa.c 	for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
cpu               224 arch/mips/loongson64/loongson-3/numa.c 		node = cpu / loongson_sysconf.cores_per_node;
cpu               228 arch/mips/loongson64/loongson-3/numa.c 		if (loongson_sysconf.reserved_cpus_mask & (1<<cpu))
cpu               234 arch/mips/loongson64/loongson-3/smp.c static void loongson3_send_ipi_single(int cpu, unsigned int action)
cpu               236 arch/mips/loongson64/loongson-3/smp.c 	loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(cpu)]);
cpu               250 arch/mips/loongson64/loongson-3/smp.c void loongson3_send_irq_by_ipi(int cpu, int irqs)
cpu               252 arch/mips/loongson64/loongson-3/smp.c 	loongson3_ipi_write32(irqs << IPI_IRQ_OFFSET, ipi_set0_regs[cpu_logical_map(cpu)]);
cpu               257 arch/mips/loongson64/loongson-3/smp.c 	int i, cpu = smp_processor_id();
cpu               261 arch/mips/loongson64/loongson-3/smp.c 	action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]);
cpu               265 arch/mips/loongson64/loongson-3/smp.c 	loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu_logical_map(cpu)]);
cpu               277 arch/mips/loongson64/loongson-3/smp.c 		BUG_ON(cpu != 0);
cpu               302 arch/mips/loongson64/loongson-3/smp.c 	unsigned int cpu = smp_processor_id();
cpu               312 arch/mips/loongson64/loongson-3/smp.c 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
cpu               313 arch/mips/loongson64/loongson-3/smp.c 	cpu_set_core(&cpu_data[cpu],
cpu               314 arch/mips/loongson64/loongson-3/smp.c 		     cpu_logical_map(cpu) % loongson_sysconf.cores_per_package);
cpu               315 arch/mips/loongson64/loongson-3/smp.c 	cpu_data[cpu].package =
cpu               316 arch/mips/loongson64/loongson-3/smp.c 		cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
cpu               319 arch/mips/loongson64/loongson-3/smp.c 	core0_c0count[cpu] = 0;
cpu               321 arch/mips/loongson64/loongson-3/smp.c 	while (!core0_c0count[cpu]) {
cpu               328 arch/mips/loongson64/loongson-3/smp.c 	if (cpu_data[cpu].package)
cpu               329 arch/mips/loongson64/loongson-3/smp.c 		initcount = core0_c0count[cpu] + i;
cpu               331 arch/mips/loongson64/loongson-3/smp.c 		initcount = core0_c0count[cpu] + i/2;
cpu               338 arch/mips/loongson64/loongson-3/smp.c 	int cpu = smp_processor_id();
cpu               343 arch/mips/loongson64/loongson-3/smp.c 			ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
cpu               394 arch/mips/loongson64/loongson-3/smp.c static int loongson3_boot_secondary(int cpu, struct task_struct *idle)
cpu               398 arch/mips/loongson64/loongson-3/smp.c 	pr_info("Booting CPU#%d...\n", cpu);
cpu               407 arch/mips/loongson64/loongson-3/smp.c 			cpu, startargs[0], startargs[1], startargs[2]);
cpu               410 arch/mips/loongson64/loongson-3/smp.c 			ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x18);
cpu               412 arch/mips/loongson64/loongson-3/smp.c 			ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x10);
cpu               414 arch/mips/loongson64/loongson-3/smp.c 			ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x8);
cpu               416 arch/mips/loongson64/loongson-3/smp.c 			ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
cpu               425 arch/mips/loongson64/loongson-3/smp.c 	unsigned int cpu = smp_processor_id();
cpu               427 arch/mips/loongson64/loongson-3/smp.c 	if (cpu == 0)
cpu               430 arch/mips/loongson64/loongson-3/smp.c 	set_cpu_online(cpu, false);
cpu               441 arch/mips/loongson64/loongson-3/smp.c static void loongson3_cpu_die(unsigned int cpu)
cpu               443 arch/mips/loongson64/loongson-3/smp.c 	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
cpu               665 arch/mips/loongson64/loongson-3/smp.c 	unsigned int cpu = smp_processor_id();
cpu               688 arch/mips/loongson64/loongson-3/smp.c 	state_addr = &per_cpu(cpu_state, cpu);
cpu               693 arch/mips/loongson64/loongson-3/smp.c static int loongson3_disable_clock(unsigned int cpu)
cpu               695 arch/mips/loongson64/loongson-3/smp.c 	uint64_t core_id = cpu_core(&cpu_data[cpu]);
cpu               696 arch/mips/loongson64/loongson-3/smp.c 	uint64_t package_id = cpu_data[cpu].package;
cpu               707 arch/mips/loongson64/loongson-3/smp.c static int loongson3_enable_clock(unsigned int cpu)
cpu               709 arch/mips/loongson64/loongson-3/smp.c 	uint64_t core_id = cpu_core(&cpu_data[cpu]);
cpu               710 arch/mips/loongson64/loongson-3/smp.c 	uint64_t package_id = cpu_data[cpu].package;
cpu                16 arch/mips/math-emu/me-debugfs.c 	int cpu;
cpu                19 arch/mips/math-emu/me-debugfs.c 	for_each_online_cpu(cpu) {
cpu                23 arch/mips/math-emu/me-debugfs.c 		ps = &per_cpu(fpuemustats, cpu);
cpu                67 arch/mips/mm/c-octeon.c 	extern void octeon_send_ipi_single(int cpu, unsigned int action);
cpu                69 arch/mips/mm/c-octeon.c 	int cpu;
cpu                77 arch/mips/mm/c-octeon.c 	cpu = smp_processor_id();
cpu                87 arch/mips/mm/c-octeon.c 	cpumask_clear_cpu(cpu, &mask);
cpu                88 arch/mips/mm/c-octeon.c 	for_each_cpu(cpu, &mask)
cpu                89 arch/mips/mm/c-octeon.c 		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
cpu                16 arch/mips/mm/context.c static bool asid_versions_eq(int cpu, u64 a, u64 b)
cpu                18 arch/mips/mm/context.c 	return ((a ^ b) & asid_version_mask(cpu)) == 0;
cpu                23 arch/mips/mm/context.c 	unsigned int cpu;
cpu                33 arch/mips/mm/context.c 	cpu = smp_processor_id();
cpu                34 arch/mips/mm/context.c 	asid = asid_cache(cpu);
cpu                36 arch/mips/mm/context.c 	if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
cpu                42 arch/mips/mm/context.c 	set_cpu_context(cpu, mm, asid);
cpu                43 arch/mips/mm/context.c 	asid_cache(cpu) = asid;
cpu                49 arch/mips/mm/context.c 	unsigned int cpu = smp_processor_id();
cpu                59 arch/mips/mm/context.c 	if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
cpu                67 arch/mips/mm/context.c 	int cpu;
cpu                75 arch/mips/mm/context.c 	for_each_possible_cpu(cpu) {
cpu                76 arch/mips/mm/context.c 		mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
cpu                86 arch/mips/mm/context.c 			mmid = per_cpu(reserved_mmids, cpu);
cpu                88 arch/mips/mm/context.c 		__set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
cpu                89 arch/mips/mm/context.c 		per_cpu(reserved_mmids, cpu) = mmid;
cpu               102 arch/mips/mm/context.c 	int cpu;
cpu               114 arch/mips/mm/context.c 	for_each_possible_cpu(cpu) {
cpu               115 arch/mips/mm/context.c 		if (per_cpu(reserved_mmids, cpu) == mmid) {
cpu               117 arch/mips/mm/context.c 			per_cpu(reserved_mmids, cpu) = newmmid;
cpu               181 arch/mips/mm/context.c 	unsigned int cpu = smp_processor_id();
cpu               187 arch/mips/mm/context.c 		write_c0_entryhi(cpu_asid(cpu, mm));
cpu               209 arch/mips/mm/context.c 	ctx = cpu_context(cpu, mm);
cpu               210 arch/mips/mm/context.c 	old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
cpu               212 arch/mips/mm/context.c 	    !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
cpu               213 arch/mips/mm/context.c 	    !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
cpu               216 arch/mips/mm/context.c 		ctx = cpu_context(cpu, mm);
cpu               217 arch/mips/mm/context.c 		if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
cpu               220 arch/mips/mm/context.c 		WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
cpu               229 arch/mips/mm/context.c 	if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
cpu               233 arch/mips/mm/context.c 		cpumask_clear_cpu(cpu, &tlb_flush_pending);
cpu               249 arch/mips/mm/context.c 	    cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
cpu               629 arch/mips/mm/page.c 	unsigned int cpu = smp_processor_id();
cpu               635 arch/mips/mm/page.c 	page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
cpu               637 arch/mips/mm/page.c 	page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
cpu               638 arch/mips/mm/page.c 	__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
cpu               644 arch/mips/mm/page.c 	while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
cpu               647 arch/mips/mm/page.c 	__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
cpu               655 arch/mips/mm/page.c 	unsigned int cpu = smp_processor_id();
cpu               662 arch/mips/mm/page.c 	page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
cpu               664 arch/mips/mm/page.c 	page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
cpu               665 arch/mips/mm/page.c 	__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
cpu               671 arch/mips/mm/page.c 	while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
cpu               674 arch/mips/mm/page.c 	__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
cpu                75 arch/mips/mm/tlb-r3k.c 	int cpu = smp_processor_id();
cpu                77 arch/mips/mm/tlb-r3k.c 	if (cpu_context(cpu, mm) != 0) {
cpu                82 arch/mips/mm/tlb-r3k.c 			cpu_context(cpu, mm) & asid_mask, start, end);
cpu                88 arch/mips/mm/tlb-r3k.c 			int newpid = cpu_context(cpu, mm) & asid_mask;
cpu               153 arch/mips/mm/tlb-r3k.c 	int cpu = smp_processor_id();
cpu               155 arch/mips/mm/tlb-r3k.c 	if (cpu_context(cpu, vma->vm_mm) != 0) {
cpu               160 arch/mips/mm/tlb-r3k.c 		printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
cpu               162 arch/mips/mm/tlb-r3k.c 		newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
cpu               197 arch/mips/mm/tlb-r3k.c 	if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
cpu               199 arch/mips/mm/tlb-r3k.c 		       (cpu_context(cpu, vma->vm_mm)), pid);
cpu               111 arch/mips/mm/tlb-r4k.c 	int cpu = smp_processor_id();
cpu               113 arch/mips/mm/tlb-r4k.c 	if (cpu_context(cpu, mm) != 0) {
cpu               124 arch/mips/mm/tlb-r4k.c 			int newpid = cpu_asid(cpu, mm);
cpu               214 arch/mips/mm/tlb-r4k.c 	int cpu = smp_processor_id();
cpu               216 arch/mips/mm/tlb-r4k.c 	if (cpu_context(cpu, vma->vm_mm) != 0) {
cpu               228 arch/mips/mm/tlb-r4k.c 			write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
cpu               230 arch/mips/mm/tlb-r4k.c 			write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
cpu                19 arch/mips/mti-malta/malta-amon.c int amon_cpu_avail(int cpu)
cpu                23 arch/mips/mti-malta/malta-amon.c 	if (cpu < 0 || cpu >= NCPULAUNCH) {
cpu                24 arch/mips/mti-malta/malta-amon.c 		pr_debug("avail: cpu%d is out of range\n", cpu);
cpu                28 arch/mips/mti-malta/malta-amon.c 	launch += cpu;
cpu                30 arch/mips/mti-malta/malta-amon.c 		pr_debug("avail: cpu%d is not ready\n", cpu);
cpu                34 arch/mips/mti-malta/malta-amon.c 		pr_debug("avail: too late.. cpu%d is already gone\n", cpu);
cpu                41 arch/mips/mti-malta/malta-amon.c int amon_cpu_start(int cpu,
cpu                48 arch/mips/mti-malta/malta-amon.c 	if (!amon_cpu_avail(cpu))
cpu                50 arch/mips/mti-malta/malta-amon.c 	if (cpu == smp_processor_id()) {
cpu                51 arch/mips/mti-malta/malta-amon.c 		pr_debug("launch: I am cpu%d!\n", cpu);
cpu                54 arch/mips/mti-malta/malta-amon.c 	launch += cpu;
cpu                56 arch/mips/mti-malta/malta-amon.c 	pr_debug("launch: starting cpu%d\n", cpu);
cpu                70 arch/mips/mti-malta/malta-amon.c 	pr_debug("launch: cpu%d gone!\n", cpu);
cpu               235 arch/mips/netlogic/common/irq.c 	int cpu, node;
cpu               237 arch/mips/netlogic/common/irq.c 	cpu = hwtid % nlm_threads_per_node();
cpu               240 arch/mips/netlogic/common/irq.c 	if (cpu == 0 && node != 0)
cpu                78 arch/mips/netlogic/common/smp.c 	int cpu;
cpu                80 arch/mips/netlogic/common/smp.c 	for_each_cpu(cpu, mask) {
cpu                81 arch/mips/netlogic/common/smp.c 		nlm_send_ipi_single(cpu, action);
cpu               108 arch/mips/netlogic/common/smp.c void nlm_early_init_secondary(int cpu)
cpu                93 arch/mips/netlogic/xlp/wakeup.c static int wait_for_cpus(int cpu, int bootcpu)
cpu               102 arch/mips/netlogic/xlp/wakeup.c 			if (cpu_ready[cpu + i] || (cpu + i) == bootcpu)
cpu               114 arch/mips/netlogic/xlp/wakeup.c 	int core, n, cpu, ncores;
cpu               182 arch/mips/netlogic/xlp/wakeup.c 			cpu = (n * ncores + core) * NLM_THREADS_PER_CORE;
cpu               183 arch/mips/netlogic/xlp/wakeup.c 			if (!cpumask_test_cpu(cpu, wakeup_mask))
cpu               194 arch/mips/netlogic/xlp/wakeup.c 			if (!wait_for_cpus(cpu, 0))
cpu                90 arch/mips/netlogic/xlr/fmn-config.c 			total_credits += cfg->cpu[n].credit_config[bkt];
cpu               123 arch/mips/netlogic/xlr/fmn-config.c 	struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
cpu               140 arch/mips/netlogic/xlr/fmn-config.c 			cpu[j].credit_config[i] = credits_per_cpu;
cpu               147 arch/mips/netlogic/xlr/fmn-config.c 			cpu[j].credit_config[i] += 4;
cpu               163 arch/mips/netlogic/xlr/fmn-config.c static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core)
cpu               168 arch/mips/netlogic/xlr/fmn-config.c 		cpu[i].start_stn_id	= (8 * i);
cpu               169 arch/mips/netlogic/xlr/fmn-config.c 		cpu[i].end_stn_id	= (8 * i + 8);
cpu               171 arch/mips/netlogic/xlr/fmn-config.c 		for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++)
cpu               182 arch/mips/netlogic/xlr/fmn-config.c 	struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
cpu               193 arch/mips/netlogic/xlr/fmn-config.c 	setup_cpu_fmninfo(cpu, num_core);
cpu               290 arch/mips/netlogic/xlr/fmn-config.c 	print_credit_config(&cpu[0]);
cpu               130 arch/mips/netlogic/xlr/fmn.c 	cpu_fmn_info = &xlr_board_fmn_config.cpu[id];
cpu               171 arch/mips/oprofile/op_model_loongson3.c static int loongson3_starting_cpu(unsigned int cpu)
cpu               178 arch/mips/oprofile/op_model_loongson3.c static int loongson3_dying_cpu(unsigned int cpu)
cpu                86 arch/mips/oprofile/op_model_mipsxx.c 	unsigned int cpu = vpe_id();					\
cpu                88 arch/mips/oprofile/op_model_mipsxx.c 	switch (cpu) {							\
cpu               101 arch/mips/oprofile/op_model_mipsxx.c 	unsigned int cpu = vpe_id();					\
cpu               103 arch/mips/oprofile/op_model_mipsxx.c 	switch (cpu) {							\
cpu                18 arch/mips/paravirt/paravirt-irq.c static int cpunum_for_cpu(int cpu)
cpu                21 arch/mips/paravirt/paravirt-irq.c 	return cpu_logical_map(cpu);
cpu               216 arch/mips/paravirt/paravirt-irq.c 	int cpu;
cpu               222 arch/mips/paravirt/paravirt-irq.c 	for_each_online_cpu(cpu) {
cpu               223 arch/mips/paravirt/paravirt-irq.c 		unsigned int cpuid = cpunum_for_cpu(cpu);
cpu               250 arch/mips/paravirt/paravirt-irq.c void irq_mbox_ipi(int cpu, unsigned int actions)
cpu               252 arch/mips/paravirt/paravirt-irq.c 	unsigned int cpuid = cpunum_for_cpu(cpu);
cpu                69 arch/mips/paravirt/paravirt-smp.c void irq_mbox_ipi(int cpu, unsigned int actions);
cpu                70 arch/mips/paravirt/paravirt-smp.c static void paravirt_send_ipi_single(int cpu, unsigned int action)
cpu                72 arch/mips/paravirt/paravirt-smp.c 	irq_mbox_ipi(cpu, action);
cpu                77 arch/mips/paravirt/paravirt-smp.c 	unsigned int cpu;
cpu                79 arch/mips/paravirt/paravirt-smp.c 	for_each_cpu(cpu, mask)
cpu                80 arch/mips/paravirt/paravirt-smp.c 		paravirt_send_ipi_single(cpu, action);
cpu               103 arch/mips/paravirt/paravirt-smp.c static int paravirt_boot_secondary(int cpu, struct task_struct *idle)
cpu               105 arch/mips/paravirt/paravirt-smp.c 	paravirt_smp_gp[cpu] = (unsigned long)task_thread_info(idle);
cpu               107 arch/mips/paravirt/paravirt-smp.c 	paravirt_smp_sp[cpu] = __KSTK_TOS(idle);
cpu               282 arch/mips/pci/pci-xtalk-bridge.c 	int ret, cpu;
cpu               286 arch/mips/pci/pci-xtalk-bridge.c 		cpu = cpumask_first_and(mask, cpu_online_mask);
cpu               287 arch/mips/pci/pci-xtalk-bridge.c 		data->nnasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
cpu               128 arch/mips/pmcs-msp71xx/msp_irq_cic.c 	int cpu;
cpu               139 arch/mips/pmcs-msp71xx/msp_irq_cic.c 	for_each_online_cpu(cpu) {
cpu               140 arch/mips/pmcs-msp71xx/msp_irq_cic.c 		if (cpumask_test_cpu(cpu, cpumask))
cpu               141 arch/mips/pmcs-msp71xx/msp_irq_cic.c 			cic_mask[cpu] |= imask;
cpu               143 arch/mips/pmcs-msp71xx/msp_irq_cic.c 			cic_mask[cpu] &= ~imask;
cpu                62 arch/mips/sgi-ip27/ip27-berr.c 	int cpu = LOCAL_HUB_L(PI_CPU_NUM);
cpu                67 arch/mips/sgi-ip27/ip27-berr.c 	printk("Slice %c got %cbe at 0x%lx\n", 'A' + cpu, data ? 'd' : 'i',
cpu                71 arch/mips/sgi-ip27/ip27-berr.c 	errst0 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS0_B : PI_ERR_STATUS0_A);
cpu                72 arch/mips/sgi-ip27/ip27-berr.c 	errst1 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS1_B : PI_ERR_STATUS1_A);
cpu                83 arch/mips/sgi-ip27/ip27-berr.c 	int cpu = LOCAL_HUB_L(PI_CPU_NUM);
cpu                84 arch/mips/sgi-ip27/ip27-berr.c 	int cpuoff = cpu << 8;
cpu                89 arch/mips/sgi-ip27/ip27-berr.c 		    cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
cpu                84 arch/mips/sgi-ip27/ip27-init.c 	int cpu = smp_processor_id();
cpu               100 arch/mips/sgi-ip27/ip27-init.c 	install_cpu_nmi_handler(cputoslice(cpu));
cpu                28 arch/mips/sgi-ip27/ip27-irq.c 	cpuid_t	cpu;
cpu                53 arch/mips/sgi-ip27/ip27-irq.c 	unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
cpu                63 arch/mips/sgi-ip27/ip27-irq.c 	unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
cpu                73 arch/mips/sgi-ip27/ip27-irq.c 	int cpu;
cpu                75 arch/mips/sgi-ip27/ip27-irq.c 	cpu = cpumask_first_and(mask, cpu_online_mask);
cpu                76 arch/mips/sgi-ip27/ip27-irq.c 	if (cpu >= nr_cpu_ids)
cpu                77 arch/mips/sgi-ip27/ip27-irq.c 		cpu = cpumask_any(cpu_online_mask);
cpu                79 arch/mips/sgi-ip27/ip27-irq.c 	nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
cpu                80 arch/mips/sgi-ip27/ip27-irq.c 	hd->cpu = cpu;
cpu                81 arch/mips/sgi-ip27/ip27-irq.c 	if (!cputoslice(cpu)) {
cpu               106 arch/mips/sgi-ip27/ip27-irq.c 	irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
cpu               145 arch/mips/sgi-ip27/ip27-irq.c 	info->nasid = cpu_to_node(hd->cpu);
cpu               189 arch/mips/sgi-ip27/ip27-irq.c 	cpuid_t cpu = smp_processor_id();
cpu               190 arch/mips/sgi-ip27/ip27-irq.c 	unsigned long *mask = per_cpu(irq_enable_mask, cpu);
cpu               231 arch/mips/sgi-ip27/ip27-irq.c 	cpuid_t cpu = smp_processor_id();
cpu               232 arch/mips/sgi-ip27/ip27-irq.c 	unsigned long *mask = per_cpu(irq_enable_mask, cpu);
cpu               256 arch/mips/sgi-ip27/ip27-irq.c 	int cpu = smp_processor_id();
cpu               257 arch/mips/sgi-ip27/ip27-irq.c 	unsigned long *mask = per_cpu(irq_enable_mask, cpu);
cpu                76 arch/mips/sgi-ip27/ip27-klconfig.c cnodeid_t get_cpu_cnode(cpuid_t cpu)
cpu                78 arch/mips/sgi-ip27/ip27-klconfig.c 	return CPUID_TO_COMPACT_NODEID(cpu);
cpu               100 arch/mips/sgi-ip27/ip27-klconfig.c klcpu_t *sn_get_cpuinfo(cpuid_t cpu)
cpu               108 arch/mips/sgi-ip27/ip27-klconfig.c 	if (!(cpu < MAXCPUS)) {
cpu               109 arch/mips/sgi-ip27/ip27-klconfig.c 		printk("sn_get_cpuinfo: illegal cpuid 0x%lx\n", cpu);
cpu               113 arch/mips/sgi-ip27/ip27-klconfig.c 	cnode = get_cpu_cnode(cpu);
cpu               122 arch/mips/sgi-ip27/ip27-klconfig.c 		if (acpu && acpu->cpu_info.virtid == cpu)
cpu               128 arch/mips/sgi-ip27/ip27-klconfig.c int get_cpu_slice(cpuid_t cpu)
cpu               132 arch/mips/sgi-ip27/ip27-klconfig.c 	if ((acpu = sn_get_cpuinfo(cpu)) == NULL)
cpu               222 arch/mips/sgi-ip27/ip27-nmi.c 					cpu = cpumask_first(cpumask_of_node(node));
cpu               223 arch/mips/sgi-ip27/ip27-nmi.c 					for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
cpu               224 arch/mips/sgi-ip27/ip27-nmi.c 						CPUMASK_SETB(nmied_cpus, cpu);
cpu               229 arch/mips/sgi-ip27/ip27-nmi.c 						SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
cpu                34 arch/mips/sgi-ip27/ip27-smp.c static void alloc_cpupda(cpuid_t cpu, int cpunum)
cpu                36 arch/mips/sgi-ip27/ip27-smp.c 	cnodeid_t node = get_cpu_cnode(cpu);
cpu                41 arch/mips/sgi-ip27/ip27-smp.c 	cputoslice(cpunum) = get_cpu_slice(cpu);
cpu               198 arch/mips/sgi-ip27/ip27-smp.c static int ip27_boot_secondary(int cpu, struct task_struct *idle)
cpu               203 arch/mips/sgi-ip27/ip27-smp.c 	LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu),
cpu                43 arch/mips/sgi-ip27/ip27-timer.c 	unsigned int cpu = smp_processor_id();
cpu                44 arch/mips/sgi-ip27/ip27-timer.c 	int slice = cputoslice(cpu);
cpu                59 arch/mips/sgi-ip27/ip27-timer.c 	unsigned int cpu = smp_processor_id();
cpu                60 arch/mips/sgi-ip27/ip27-timer.c 	struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
cpu                61 arch/mips/sgi-ip27/ip27-timer.c 	int slice = cputoslice(cpu);
cpu                92 arch/mips/sgi-ip27/ip27-timer.c 	unsigned int cpu = smp_processor_id();
cpu                93 arch/mips/sgi-ip27/ip27-timer.c 	struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
cpu                94 arch/mips/sgi-ip27/ip27-timer.c 	unsigned char *name = per_cpu(hub_rt_name, cpu);
cpu                96 arch/mips/sgi-ip27/ip27-timer.c 	sprintf(name, "hub-rt %d", cpu);
cpu               106 arch/mips/sgi-ip27/ip27-timer.c 	cd->cpumask		= cpumask_of(cpu);
cpu               157 arch/mips/sgi-ip27/ip27-timer.c 	klcpu_t *cpu;
cpu               166 arch/mips/sgi-ip27/ip27-timer.c 	cpu = (klcpu_t *) KLCF_COMP(board, cpuid);
cpu               167 arch/mips/sgi-ip27/ip27-timer.c 	if (!cpu)
cpu               170 arch/mips/sgi-ip27/ip27-timer.c 	printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed);
cpu                42 arch/mips/sibyte/bcm1480/irq.c void bcm1480_mask_irq(int cpu, int irq)
cpu                53 arch/mips/sibyte/bcm1480/irq.c 	cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
cpu                55 arch/mips/sibyte/bcm1480/irq.c 	____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
cpu                59 arch/mips/sibyte/bcm1480/irq.c void bcm1480_unmask_irq(int cpu, int irq)
cpu                70 arch/mips/sibyte/bcm1480/irq.c 	cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
cpu                72 arch/mips/sibyte/bcm1480/irq.c 	____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
cpu                81 arch/mips/sibyte/bcm1480/irq.c 	int i = 0, old_cpu, cpu, int_on, k;
cpu                88 arch/mips/sibyte/bcm1480/irq.c 	cpu = cpu_logical_map(i);
cpu               108 arch/mips/sibyte/bcm1480/irq.c 		bcm1480_irq_owner[irq] = cpu;
cpu               111 arch/mips/sibyte/bcm1480/irq.c 			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
cpu               113 arch/mips/sibyte/bcm1480/irq.c 			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
cpu               239 arch/mips/sibyte/bcm1480/irq.c 	unsigned int i, cpu;
cpu               247 arch/mips/sibyte/bcm1480/irq.c 		for (cpu = 0; cpu < 4; cpu++) {
cpu               249 arch/mips/sibyte/bcm1480/irq.c 				     IOADDR(A_BCM1480_IMR_REGISTER(cpu,
cpu               256 arch/mips/sibyte/bcm1480/irq.c 		for (cpu = 0; cpu < 4; cpu++) {
cpu               258 arch/mips/sibyte/bcm1480/irq.c 				     IOADDR(A_BCM1480_IMR_REGISTER(cpu,
cpu               270 arch/mips/sibyte/bcm1480/irq.c 	for (cpu = 0; cpu < 4; cpu++) {
cpu               271 arch/mips/sibyte/bcm1480/irq.c 		__raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) +
cpu               277 arch/mips/sibyte/bcm1480/irq.c 	for (cpu = 0; cpu < 4; cpu++) {
cpu               279 arch/mips/sibyte/bcm1480/irq.c 			     IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU)));
cpu               281 arch/mips/sibyte/bcm1480/irq.c 			     IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_1_CLR_CPU)));
cpu               287 arch/mips/sibyte/bcm1480/irq.c 	for (cpu = 0; cpu < 4; cpu++) {
cpu               288 arch/mips/sibyte/bcm1480/irq.c 		__raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_H)));
cpu               291 arch/mips/sibyte/bcm1480/irq.c 	for (cpu = 0; cpu < 4; cpu++) {
cpu               292 arch/mips/sibyte/bcm1480/irq.c 		__raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_L)));
cpu               310 arch/mips/sibyte/bcm1480/irq.c 	unsigned int cpu = smp_processor_id();
cpu               318 arch/mips/sibyte/bcm1480/irq.c 	base = A_BCM1480_IMR_MAPPER(cpu);
cpu               334 arch/mips/sibyte/bcm1480/irq.c 	unsigned int cpu = smp_processor_id();
cpu               340 arch/mips/sibyte/bcm1480/irq.c 		do_IRQ(K_BCM1480_INT_TIMER_0 + cpu);
cpu                67 arch/mips/sibyte/bcm1480/smp.c static void bcm1480_send_ipi_single(int cpu, unsigned int action)
cpu                69 arch/mips/sibyte/bcm1480/smp.c 	__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
cpu               107 arch/mips/sibyte/bcm1480/smp.c static int bcm1480_boot_secondary(int cpu, struct task_struct *idle)
cpu               111 arch/mips/sibyte/bcm1480/smp.c 	retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
cpu               115 arch/mips/sibyte/bcm1480/smp.c 		printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
cpu               160 arch/mips/sibyte/bcm1480/smp.c 	int cpu = smp_processor_id();
cpu               166 arch/mips/sibyte/bcm1480/smp.c 	action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
cpu               169 arch/mips/sibyte/bcm1480/smp.c 	__raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]);
cpu                41 arch/mips/sibyte/sb1250/irq.c void sb1250_mask_irq(int cpu, int irq)
cpu                47 arch/mips/sibyte/sb1250/irq.c 	cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
cpu                50 arch/mips/sibyte/sb1250/irq.c 	____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
cpu                55 arch/mips/sibyte/sb1250/irq.c void sb1250_unmask_irq(int cpu, int irq)
cpu                61 arch/mips/sibyte/sb1250/irq.c 	cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
cpu                64 arch/mips/sibyte/sb1250/irq.c 	____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
cpu                73 arch/mips/sibyte/sb1250/irq.c 	int i = 0, old_cpu, cpu, int_on;
cpu                81 arch/mips/sibyte/sb1250/irq.c 	cpu = cpu_logical_map(i);
cpu                97 arch/mips/sibyte/sb1250/irq.c 	sb1250_irq_owner[irq] = cpu;
cpu               100 arch/mips/sibyte/sb1250/irq.c 		cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
cpu               103 arch/mips/sibyte/sb1250/irq.c 		____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
cpu               145 arch/mips/sibyte/sb1250/irq.c 			int cpu;
cpu               147 arch/mips/sibyte/sb1250/irq.c 			cpu = cpu_logical_map(i);
cpu               149 arch/mips/sibyte/sb1250/irq.c 			cpu = i;
cpu               156 arch/mips/sibyte/sb1250/irq.c 				     IOADDR(A_IMR_REGISTER(cpu,
cpu               279 arch/mips/sibyte/sb1250/irq.c 	unsigned int cpu = smp_processor_id();
cpu               287 arch/mips/sibyte/sb1250/irq.c 	mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu,
cpu               295 arch/mips/sibyte/sb1250/irq.c 	unsigned int cpu = smp_processor_id();
cpu               313 arch/mips/sibyte/sb1250/irq.c 		do_IRQ(K_INT_TIMER_0 + cpu);	/* sb1250_timer_interrupt() */
cpu                56 arch/mips/sibyte/sb1250/smp.c static void sb1250_send_ipi_single(int cpu, unsigned int action)
cpu                58 arch/mips/sibyte/sb1250/smp.c 	__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
cpu                96 arch/mips/sibyte/sb1250/smp.c static int sb1250_boot_secondary(int cpu, struct task_struct *idle)
cpu               100 arch/mips/sibyte/sb1250/smp.c 	retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
cpu               104 arch/mips/sibyte/sb1250/smp.c 		printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
cpu               149 arch/mips/sibyte/sb1250/smp.c 	int cpu = smp_processor_id();
cpu               155 arch/mips/sibyte/sb1250/smp.c 	action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
cpu               158 arch/mips/sibyte/sb1250/smp.c 	____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]);
cpu                72 arch/mips/sni/time.c 	unsigned int cpu = smp_processor_id();
cpu                74 arch/mips/sni/time.c 	cd->cpumask		= cpumask_of(cpu);
cpu               116 arch/nds32/include/asm/l2_cache.h static inline unsigned long GET_L2CC_CTRL_CPU(unsigned long cpu)
cpu               118 arch/nds32/include/asm/l2_cache.h 	if (cpu == smp_processor_id())
cpu               120 arch/nds32/include/asm/l2_cache.h 	return L2C_R_REG(L2CC_CTRL_OFF + (cpu << 8));
cpu               123 arch/nds32/include/asm/l2_cache.h static inline void SET_L2CC_CTRL_CPU(unsigned long cpu, unsigned long val)
cpu               125 arch/nds32/include/asm/l2_cache.h 	if (cpu == smp_processor_id())
cpu               128 arch/nds32/include/asm/l2_cache.h 		L2C_W_REG(L2CC_CTRL_OFF + (cpu << 8), val);
cpu               131 arch/nds32/include/asm/l2_cache.h static inline unsigned long GET_L2CC_STATUS_CPU(unsigned long cpu)
cpu               133 arch/nds32/include/asm/l2_cache.h 	if (cpu == smp_processor_id())
cpu               135 arch/nds32/include/asm/l2_cache.h 	return L2C_R_REG(L2_CCTL_STATUS_OFF + (cpu << 8));
cpu                57 arch/nds32/include/asm/mmu_context.h 	unsigned int cpu = smp_processor_id();
cpu                59 arch/nds32/include/asm/mmu_context.h 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
cpu                27 arch/nds32/kernel/cacheinfo.c int init_cache_level(unsigned int cpu)
cpu                29 arch/nds32/kernel/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu                37 arch/nds32/kernel/cacheinfo.c int populate_cache_leaves(unsigned int cpu)
cpu                40 arch/nds32/kernel/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu              1108 arch/nds32/kernel/perf_event_cpu.c 	int cpu;
cpu              1109 arch/nds32/kernel/perf_event_cpu.c 	struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
cpu                90 arch/nds32/kernel/setup.c static void __init dump_cpu_info(int cpu)
cpu               102 arch/nds32/kernel/setup.c 	pr_info("CPU%d Features: %s\n", cpu, str);
cpu                43 arch/nios2/include/asm/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                61 arch/nios2/include/asm/thread_info.h 	.cpu		= 0,			\
cpu                22 arch/nios2/kernel/cpuinfo.c static inline u32 fcpu(struct device_node *cpu, const char *n)
cpu                26 arch/nios2/kernel/cpuinfo.c 	of_property_read_u32(cpu, n, &val);
cpu                33 arch/nios2/kernel/cpuinfo.c 	struct device_node *cpu;
cpu                37 arch/nios2/kernel/cpuinfo.c 	cpu = of_get_cpu_node(0, NULL);
cpu                38 arch/nios2/kernel/cpuinfo.c 	if (!cpu)
cpu                41 arch/nios2/kernel/cpuinfo.c 	if (!of_property_read_bool(cpu, "altr,has-initda"))
cpu                46 arch/nios2/kernel/cpuinfo.c 	cpuinfo.cpu_clock_freq = fcpu(cpu, "clock-frequency");
cpu                48 arch/nios2/kernel/cpuinfo.c 	str = of_get_property(cpu, "altr,implementation", &len);
cpu                54 arch/nios2/kernel/cpuinfo.c 	cpuinfo.has_div = of_property_read_bool(cpu, "altr,has-div");
cpu                55 arch/nios2/kernel/cpuinfo.c 	cpuinfo.has_mul = of_property_read_bool(cpu, "altr,has-mul");
cpu                56 arch/nios2/kernel/cpuinfo.c 	cpuinfo.has_mulx = of_property_read_bool(cpu, "altr,has-mulx");
cpu                57 arch/nios2/kernel/cpuinfo.c 	cpuinfo.has_bmx = of_property_read_bool(cpu, "altr,has-bmx");
cpu                58 arch/nios2/kernel/cpuinfo.c 	cpuinfo.has_cdx = of_property_read_bool(cpu, "altr,has-cdx");
cpu                59 arch/nios2/kernel/cpuinfo.c 	cpuinfo.mmu = of_property_read_bool(cpu, "altr,has-mmu");
cpu                76 arch/nios2/kernel/cpuinfo.c 	cpuinfo.tlb_num_ways = fcpu(cpu, "altr,tlb-num-ways");
cpu                80 arch/nios2/kernel/cpuinfo.c 	cpuinfo.icache_line_size = fcpu(cpu, "icache-line-size");
cpu                81 arch/nios2/kernel/cpuinfo.c 	cpuinfo.icache_size = fcpu(cpu, "icache-size");
cpu                88 arch/nios2/kernel/cpuinfo.c 	cpuinfo.dcache_line_size = fcpu(cpu, "dcache-line-size");
cpu                94 arch/nios2/kernel/cpuinfo.c 	cpuinfo.dcache_size = fcpu(cpu, "dcache-size");
cpu               101 arch/nios2/kernel/cpuinfo.c 	cpuinfo.tlb_pid_num_bits = fcpu(cpu, "altr,pid-num-bits");
cpu               103 arch/nios2/kernel/cpuinfo.c 	cpuinfo.tlb_num_entries = fcpu(cpu, "altr,tlb-num-entries");
cpu               105 arch/nios2/kernel/cpuinfo.c 	cpuinfo.tlb_ptr_sz = fcpu(cpu, "altr,tlb-ptr-sz");
cpu               107 arch/nios2/kernel/cpuinfo.c 	cpuinfo.reset_addr = fcpu(cpu, "altr,reset-addr");
cpu               108 arch/nios2/kernel/cpuinfo.c 	cpuinfo.exception_addr = fcpu(cpu, "altr,exception-addr");
cpu               109 arch/nios2/kernel/cpuinfo.c 	cpuinfo.fast_tlb_miss_exc_addr = fcpu(cpu, "altr,fast-tlb-miss-addr");
cpu               111 arch/nios2/kernel/cpuinfo.c 	of_node_put(cpu);
cpu                15 arch/openrisc/include/asm/smp.h #define raw_smp_processor_id()	(current_thread_info()->cpu)
cpu                20 arch/openrisc/include/asm/smp.h extern void arch_send_call_function_single_ipi(int cpu);
cpu                48 arch/openrisc/include/asm/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                72 arch/openrisc/include/asm/thread_info.h 	.cpu		= 0,				\
cpu                19 arch/openrisc/include/asm/time.h extern void synchronise_count_master(int cpu);
cpu                20 arch/openrisc/include/asm/time.h extern void synchronise_count_slave(int cpu);
cpu               151 arch/openrisc/kernel/setup.c static struct device_node *setup_find_cpu_node(int cpu)
cpu               159 arch/openrisc/kernel/setup.c 		if (hwid == cpu)
cpu               168 arch/openrisc/kernel/setup.c 	struct device_node *cpu;
cpu               174 arch/openrisc/kernel/setup.c 	cpu = setup_find_cpu_node(cpu_id);
cpu               175 arch/openrisc/kernel/setup.c 	if (!cpu)
cpu               192 arch/openrisc/kernel/setup.c 	if (of_property_read_u32(cpu, "clock-frequency",
cpu               202 arch/openrisc/kernel/setup.c 	of_node_put(cpu);
cpu               268 arch/openrisc/kernel/setup.c 	struct device_node *cpu = setup_find_cpu_node(smp_processor_id());
cpu               270 arch/openrisc/kernel/setup.c 	val = of_get_property(cpu, "clock-frequency", NULL);
cpu                38 arch/openrisc/kernel/smp.c static void boot_secondary(unsigned int cpu, struct task_struct *idle)
cpu                46 arch/openrisc/kernel/smp.c 	secondary_release = cpu;
cpu                47 arch/openrisc/kernel/smp.c 	smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
cpu                86 arch/openrisc/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *idle)
cpu                90 arch/openrisc/kernel/smp.c 			cpu);
cpu                95 arch/openrisc/kernel/smp.c 	current_pgd[cpu] = init_mm.pgd;
cpu                97 arch/openrisc/kernel/smp.c 	boot_secondary(cpu, idle);
cpu               100 arch/openrisc/kernel/smp.c 		pr_crit("CPU%u: failed to start\n", cpu);
cpu               103 arch/openrisc/kernel/smp.c 	synchronise_count_master(cpu);
cpu               111 arch/openrisc/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               118 arch/openrisc/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu               120 arch/openrisc/kernel/smp.c 	pr_info("CPU%u: Booted secondary processor\n", cpu);
cpu               125 arch/openrisc/kernel/smp.c 	notify_cpu_starting(cpu);
cpu               132 arch/openrisc/kernel/smp.c 	synchronise_count_slave(cpu);
cpu               133 arch/openrisc/kernel/smp.c 	set_cpu_online(cpu, true);
cpu               146 arch/openrisc/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               165 arch/openrisc/kernel/smp.c 		WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
cpu               170 arch/openrisc/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               172 arch/openrisc/kernel/smp.c 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
cpu               205 arch/openrisc/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               207 arch/openrisc/kernel/smp.c 	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
cpu                31 arch/openrisc/kernel/sync-timer.c void synchronise_count_master(int cpu)
cpu                36 arch/openrisc/kernel/sync-timer.c 	pr_info("Synchronize counters for CPU %u: ", cpu);
cpu                93 arch/openrisc/kernel/sync-timer.c void synchronise_count_slave(int cpu)
cpu                67 arch/openrisc/kernel/time.c 	unsigned int cpu = smp_processor_id();
cpu                69 arch/openrisc/kernel/time.c 		&per_cpu(clockevent_openrisc_timer, cpu);
cpu                70 arch/openrisc/kernel/time.c 	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu];
cpu                82 arch/openrisc/kernel/time.c 	evt->cpumask = cpumask_of(cpu);
cpu               112 arch/openrisc/kernel/time.c 	unsigned int cpu = smp_processor_id();
cpu               114 arch/openrisc/kernel/time.c 		&per_cpu(clockevent_openrisc_timer, cpu);
cpu                35 arch/parisc/include/asm/hardirq.h #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
cpu                45 arch/parisc/include/asm/irq.h extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
cpu                27 arch/parisc/include/asm/smp.h #define cpu_number_map(cpu)	(cpu)
cpu                28 arch/parisc/include/asm/smp.h #define cpu_logical_map(cpu)	(cpu)
cpu                32 arch/parisc/include/asm/smp.h extern void arch_send_call_function_single_ipi(int cpu);
cpu                37 arch/parisc/include/asm/smp.h #define raw_smp_processor_id()	(current_thread_info()->cpu)
cpu                50 arch/parisc/include/asm/smp.h static inline void __cpu_die (unsigned int cpu) {
cpu                15 arch/parisc/include/asm/thread_info.h 	__u32 cpu;			/* current CPU */
cpu                23 arch/parisc/include/asm/thread_info.h 	.cpu		= 0,			\
cpu                18 arch/parisc/include/asm/topology.h #define topology_physical_package_id(cpu)	(cpu_topology[cpu].socket_id)
cpu                19 arch/parisc/include/asm/topology.h #define topology_core_id(cpu)		(cpu_topology[cpu].core_id)
cpu                20 arch/parisc/include/asm/topology.h #define topology_core_cpumask(cpu)	(&cpu_topology[cpu].core_sibling)
cpu                21 arch/parisc/include/asm/topology.h #define topology_sibling_cpumask(cpu)	(&cpu_topology[cpu].thread_sibling)
cpu                25 arch/parisc/include/asm/topology.h const struct cpumask *cpu_coregroup_mask(int cpu);
cpu               233 arch/parisc/kernel/asm-offsets.c 	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
cpu              1226 arch/parisc/kernel/hardware.c 	enum cpu_type cpu;
cpu              1370 arch/parisc/kernel/hardware.c 			return ptr->cpu;
cpu                73 arch/parisc/kernel/irq.c 	int cpu = smp_processor_id();
cpu                76 arch/parisc/kernel/irq.c 	per_cpu(local_ack_eiem, cpu) &= ~mask;
cpu                79 arch/parisc/kernel/irq.c 	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
cpu                88 arch/parisc/kernel/irq.c 	int cpu = smp_processor_id();
cpu                91 arch/parisc/kernel/irq.c 	per_cpu(local_ack_eiem, cpu) |= mask;
cpu                94 arch/parisc/kernel/irq.c 	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
cpu               334 arch/parisc/kernel/irq.c unsigned long txn_affinity_addr(unsigned int irq, int cpu)
cpu               338 arch/parisc/kernel/irq.c 	cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu));
cpu               341 arch/parisc/kernel/irq.c 	return per_cpu(cpu_data, cpu).txn_addr;
cpu               405 arch/parisc/kernel/irq.c 	int cpu = smp_processor_id();
cpu               423 arch/parisc/kernel/irq.c 	stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
cpu               426 arch/parisc/kernel/irq.c 	last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
cpu               442 arch/parisc/kernel/irq.c 	last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
cpu               511 arch/parisc/kernel/irq.c 	int irq, cpu = smp_processor_id();
cpu               521 arch/parisc/kernel/irq.c 	eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
cpu               536 arch/parisc/kernel/irq.c 		int cpu = cpumask_first(&dest);
cpu               539 arch/parisc/kernel/irq.c 		       irq, smp_processor_id(), cpu);
cpu               541 arch/parisc/kernel/irq.c 			   per_cpu(cpu_data, cpu).hpa);
cpu               559 arch/parisc/kernel/irq.c 	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
cpu               375 arch/parisc/kernel/processor.c 	unsigned long cpu;
cpu               377 arch/parisc/kernel/processor.c 	for_each_online_cpu(cpu) {
cpu               378 arch/parisc/kernel/processor.c 		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
cpu               385 arch/parisc/kernel/processor.c 				 cpu, boot_cpu_data.family_name);
cpu               396 arch/parisc/kernel/processor.c 				topology_physical_package_id(cpu));
cpu               398 arch/parisc/kernel/processor.c 				cpumask_weight(topology_core_cpumask(cpu)));
cpu               399 arch/parisc/kernel/processor.c 		seq_printf(m, "core id\t\t: %d\n", topology_core_id(cpu));
cpu               188 arch/parisc/kernel/smp.c ipi_send(int cpu, enum ipi_message_type op)
cpu               190 arch/parisc/kernel/smp.c 	struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
cpu               191 arch/parisc/kernel/smp.c 	spinlock_t *lock = &per_cpu(ipi_lock, cpu);
cpu               203 arch/parisc/kernel/smp.c 	int cpu;
cpu               205 arch/parisc/kernel/smp.c 	for_each_cpu(cpu, mask)
cpu               206 arch/parisc/kernel/smp.c 		ipi_send(cpu, op);
cpu               233 arch/parisc/kernel/smp.c smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
cpu               246 arch/parisc/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               248 arch/parisc/kernel/smp.c 	send_IPI_single(cpu, IPI_CALL_FUNC);
cpu               325 arch/parisc/kernel/smp.c 	task_thread_info(idle)->cpu = cpuid;
cpu               397 arch/parisc/kernel/smp.c 	int cpu;
cpu               399 arch/parisc/kernel/smp.c 	for_each_possible_cpu(cpu)
cpu               400 arch/parisc/kernel/smp.c 		spin_lock_init(&per_cpu(ipi_lock, cpu));
cpu               416 arch/parisc/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu               418 arch/parisc/kernel/smp.c 	if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
cpu               421 arch/parisc/kernel/smp.c 	return cpu_online(cpu) ? 0 : -ENOSYS;
cpu                67 arch/parisc/kernel/time.c 	unsigned int cpu = smp_processor_id();
cpu                68 arch/parisc/kernel/time.c 	struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
cpu                89 arch/parisc/kernel/time.c 	if (cpu == 0)
cpu               158 arch/parisc/kernel/time.c 	unsigned int cpu = smp_processor_id();
cpu               163 arch/parisc/kernel/time.c 	per_cpu(cpu_data, cpu).it_value = next_tick;
cpu               253 arch/parisc/kernel/time.c 		int cpu;
cpu               257 arch/parisc/kernel/time.c 		for_each_online_cpu(cpu) {
cpu               258 arch/parisc/kernel/time.c 			if (cpu == 0)
cpu               261 arch/parisc/kernel/time.c 			    (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
cpu                25 arch/parisc/kernel/topology.c const struct cpumask *cpu_coregroup_mask(int cpu)
cpu                27 arch/parisc/kernel/topology.c 	return &cpu_topology[cpu].core_sibling;
cpu                33 arch/parisc/kernel/topology.c 	int cpu;
cpu                36 arch/parisc/kernel/topology.c 	for_each_possible_cpu(cpu) {
cpu                37 arch/parisc/kernel/topology.c 		cpu_topo = &cpu_topology[cpu];
cpu                43 arch/parisc/kernel/topology.c 		if (cpu != cpuid)
cpu                44 arch/parisc/kernel/topology.c 			cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
cpu                50 arch/parisc/kernel/topology.c 		if (cpu != cpuid)
cpu                51 arch/parisc/kernel/topology.c 			cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
cpu                68 arch/parisc/kernel/topology.c 	unsigned long cpu;
cpu                79 arch/parisc/kernel/topology.c 	for_each_online_cpu(cpu) {
cpu                80 arch/parisc/kernel/topology.c 		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
cpu                82 arch/parisc/kernel/topology.c 		if (cpu == cpuid) /* ignore current cpu */
cpu                86 arch/parisc/kernel/topology.c 			cpuid_topo->core_id = cpu_topology[cpu].core_id;
cpu                89 arch/parisc/kernel/topology.c 				cpuid_topo->socket_id = cpu_topology[cpu].socket_id;
cpu                96 arch/parisc/kernel/topology.c 			max_socket = max(max_socket, cpu_topology[cpu].socket_id);
cpu               125 arch/parisc/kernel/topology.c 	unsigned int cpu;
cpu               128 arch/parisc/kernel/topology.c 	for_each_possible_cpu(cpu) {
cpu               129 arch/parisc/kernel/topology.c 		struct cputopo_parisc *cpu_topo = &(cpu_topology[cpu]);
cpu               146 arch/parisc/kernel/traps.c 	       level, current_thread_info()->cpu, cr30, cr31);
cpu                33 arch/parisc/lib/delay.c 	int cpu;
cpu                36 arch/parisc/lib/delay.c 	cpu = smp_processor_id();
cpu                58 arch/parisc/lib/delay.c 		if (unlikely(cpu != smp_processor_id())) {
cpu                60 arch/parisc/lib/delay.c 			cpu = smp_processor_id();
cpu               335 arch/powerpc/boot/4xx.c 	u32 cpu, plb, opb, ebc, tb, uart0, uart1, m;
cpu               341 arch/powerpc/boot/4xx.c 		cpu = plb = sys_clk;
cpu               349 arch/powerpc/boot/4xx.c 		cpu = sys_clk * m / CPC0_SYS0_FWDVA(sys0);
cpu               362 arch/powerpc/boot/4xx.c 		tb = cpu;
cpu               381 arch/powerpc/boot/4xx.c 	dt_fixup_cpu_clocks(cpu, tb, 0);
cpu               419 arch/powerpc/boot/4xx.c 	u32 cpu, plb, opb, ebc, vco;
cpu               456 arch/powerpc/boot/4xx.c 	cpu = clk_a / pradv0;
cpu               470 arch/powerpc/boot/4xx.c 		tb = cpu;
cpu               472 arch/powerpc/boot/4xx.c 	dt_fixup_cpu_clocks(cpu, tb, 0);
cpu               554 arch/powerpc/boot/4xx.c 	u32 cpu, plb, opb, ebc, tb, uart0, uart1, m;
cpu               585 arch/powerpc/boot/4xx.c 		cpu = sys_clk * m / fwdv;
cpu               589 arch/powerpc/boot/4xx.c 		cpu = sys_clk * m / fwdv;
cpu               590 arch/powerpc/boot/4xx.c 		plb = cpu / cbdv;
cpu               599 arch/powerpc/boot/4xx.c 		uart0 = cpu / udiv;
cpu               605 arch/powerpc/boot/4xx.c 		uart1 = cpu / udiv;
cpu               610 arch/powerpc/boot/4xx.c 	tb = cpu;
cpu               612 arch/powerpc/boot/4xx.c 	dt_fixup_cpu_clocks(cpu, tb, 0);
cpu               626 arch/powerpc/boot/4xx.c 	u32 cpu, plb, opb, ebc, uart0, uart1;
cpu               644 arch/powerpc/boot/4xx.c 		cpu = sys_clk * m / (fwdva * pllmr0_ccdv);
cpu               646 arch/powerpc/boot/4xx.c 		cpu = sys_clk / pllmr0_ccdv;
cpu               648 arch/powerpc/boot/4xx.c 	plb = cpu / cbdv;
cpu               651 arch/powerpc/boot/4xx.c 	tb = cpu;
cpu               652 arch/powerpc/boot/4xx.c 	uart0 = cpu / (cpc0_ucr & 0x0000007f);
cpu               653 arch/powerpc/boot/4xx.c 	uart1 = cpu / ((cpc0_ucr & 0x00007f00) >> 8);
cpu               655 arch/powerpc/boot/4xx.c 	dt_fixup_cpu_clocks(cpu, tb, 0);
cpu               749 arch/powerpc/boot/4xx.c 	u32 cpu, plb, opb, ebc, vco, tb, uart0, uart1;
cpu               782 arch/powerpc/boot/4xx.c 	cpu = vco / (fwdva * cpudv0);
cpu               790 arch/powerpc/boot/4xx.c 	tb = cpu;
cpu               793 arch/powerpc/boot/4xx.c 	dt_fixup_cpu_clocks(cpu, tb, 0);
cpu                58 arch/powerpc/boot/devtree.c void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus)
cpu                62 arch/powerpc/boot/devtree.c 	printf("CPU clock-frequency <- 0x%x (%dMHz)\n\r", cpu, MHZ(cpu));
cpu                68 arch/powerpc/boot/devtree.c 		setprop_val(devp, "clock-frequency", cpu);
cpu                66 arch/powerpc/include/asm/cell-pmu.h extern u32  cbe_read_phys_ctr(u32 cpu, u32 phys_ctr);
cpu                67 arch/powerpc/include/asm/cell-pmu.h extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
cpu                68 arch/powerpc/include/asm/cell-pmu.h extern u32  cbe_read_ctr(u32 cpu, u32 ctr);
cpu                69 arch/powerpc/include/asm/cell-pmu.h extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val);
cpu                71 arch/powerpc/include/asm/cell-pmu.h extern u32  cbe_read_pm07_control(u32 cpu, u32 ctr);
cpu                72 arch/powerpc/include/asm/cell-pmu.h extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val);
cpu                73 arch/powerpc/include/asm/cell-pmu.h extern u32  cbe_read_pm(u32 cpu, enum pm_reg_name reg);
cpu                74 arch/powerpc/include/asm/cell-pmu.h extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
cpu                76 arch/powerpc/include/asm/cell-pmu.h extern u32  cbe_get_ctr_size(u32 cpu, u32 phys_ctr);
cpu                77 arch/powerpc/include/asm/cell-pmu.h extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
cpu                79 arch/powerpc/include/asm/cell-pmu.h extern void cbe_enable_pm(u32 cpu);
cpu                80 arch/powerpc/include/asm/cell-pmu.h extern void cbe_disable_pm(u32 cpu);
cpu                82 arch/powerpc/include/asm/cell-pmu.h extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
cpu                84 arch/powerpc/include/asm/cell-pmu.h extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
cpu                85 arch/powerpc/include/asm/cell-pmu.h extern void cbe_disable_pm_interrupts(u32 cpu);
cpu                86 arch/powerpc/include/asm/cell-pmu.h extern u32  cbe_get_and_clear_pm_interrupts(u32 cpu);
cpu               128 arch/powerpc/include/asm/cell-regs.h extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
cpu               160 arch/powerpc/include/asm/cell-regs.h extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
cpu               218 arch/powerpc/include/asm/cell-regs.h extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
cpu               305 arch/powerpc/include/asm/cell-regs.h extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
cpu               319 arch/powerpc/include/asm/cell-regs.h extern u32 cbe_get_hw_thread_id(int cpu);
cpu               320 arch/powerpc/include/asm/cell-regs.h extern u32 cbe_cpu_to_node(int cpu);
cpu                87 arch/powerpc/include/asm/cpuidle.h unsigned long pnv_cpu_offline(unsigned int cpu);
cpu                50 arch/powerpc/include/asm/cputhreads.h 	int		i, cpu;
cpu                56 arch/powerpc/include/asm/cputhreads.h 			cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
cpu                57 arch/powerpc/include/asm/cputhreads.h 			if (cpu < nr_cpu_ids)
cpu                58 arch/powerpc/include/asm/cputhreads.h 				cpumask_set_cpu(cpu, &res);
cpu                75 arch/powerpc/include/asm/cputhreads.h int cpu_core_index_of_thread(int cpu);
cpu                78 arch/powerpc/include/asm/cputhreads.h static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
cpu                82 arch/powerpc/include/asm/cputhreads.h static inline int cpu_thread_in_core(int cpu)
cpu                84 arch/powerpc/include/asm/cputhreads.h 	return cpu & (threads_per_core - 1);
cpu                87 arch/powerpc/include/asm/cputhreads.h static inline int cpu_thread_in_subcore(int cpu)
cpu                89 arch/powerpc/include/asm/cputhreads.h 	return cpu & (threads_per_subcore - 1);
cpu                92 arch/powerpc/include/asm/cputhreads.h static inline int cpu_first_thread_sibling(int cpu)
cpu                94 arch/powerpc/include/asm/cputhreads.h 	return cpu & ~(threads_per_core - 1);
cpu                97 arch/powerpc/include/asm/cputhreads.h static inline int cpu_last_thread_sibling(int cpu)
cpu                99 arch/powerpc/include/asm/cputhreads.h 	return cpu | (threads_per_core - 1);
cpu                90 arch/powerpc/include/asm/dbell.h extern void doorbell_global_ipi(int cpu);
cpu                91 arch/powerpc/include/asm/dbell.h extern void doorbell_core_ipi(int cpu);
cpu                92 arch/powerpc/include/asm/dbell.h extern int doorbell_try_core_ipi(int cpu);
cpu                23 arch/powerpc/include/asm/fsl_pamu_stash.h 	u32	cpu;	/* cpu number */
cpu                25 arch/powerpc/include/asm/fsl_pm.h 	void (*irq_mask)(int cpu);
cpu                28 arch/powerpc/include/asm/fsl_pm.h 	void (*irq_unmask)(int cpu);
cpu                29 arch/powerpc/include/asm/fsl_pm.h 	void (*cpu_enter_state)(int cpu, int state);
cpu                30 arch/powerpc/include/asm/fsl_pm.h 	void (*cpu_exit_state)(int cpu, int state);
cpu                31 arch/powerpc/include/asm/fsl_pm.h 	void (*cpu_up_prepare)(int cpu);
cpu                32 arch/powerpc/include/asm/fsl_pm.h 	void (*cpu_die)(int cpu);
cpu                36 arch/powerpc/include/asm/hardirq.h extern u64 arch_irq_stat_cpu(unsigned int cpu);
cpu               544 arch/powerpc/include/asm/kvm_book3s_64.h extern void kvmhv_rm_send_ipi(int cpu);
cpu               860 arch/powerpc/include/asm/kvm_host.h static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
cpu               130 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
cpu               272 arch/powerpc/include/asm/kvm_ppc.h 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
cpu               433 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
cpu               435 arch/powerpc/include/asm/kvm_ppc.h 	paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
cpu               438 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_xive_tima(int cpu,
cpu               442 arch/powerpc/include/asm/kvm_ppc.h 	paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
cpu               443 arch/powerpc/include/asm/kvm_ppc.h 	paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
cpu               529 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_host_ipi(int cpu)
cpu               537 arch/powerpc/include/asm/kvm_ppc.h 	paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
cpu               540 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_clear_host_ipi(int cpu)
cpu               542 arch/powerpc/include/asm/kvm_ppc.h 	paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
cpu               567 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
cpu               570 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_xive_tima(int cpu,
cpu               580 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_host_ipi(int cpu)
cpu               583 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_clear_host_ipi(int cpu)
cpu               618 arch/powerpc/include/asm/kvm_ppc.h 			struct kvm_vcpu *vcpu, u32 cpu);
cpu               668 arch/powerpc/include/asm/kvm_ppc.h 				    struct kvm_vcpu *vcpu, u32 cpu);
cpu               687 arch/powerpc/include/asm/kvm_ppc.h 					   struct kvm_vcpu *vcpu, u32 cpu);
cpu               708 arch/powerpc/include/asm/kvm_ppc.h 					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
cpu               724 arch/powerpc/include/asm/kvm_ppc.h 			  struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
cpu               847 arch/powerpc/include/asm/kvm_ppc.h 			     u32 cpu);
cpu               857 arch/powerpc/include/asm/kvm_ppc.h 		struct kvm_vcpu *vcpu, u32 cpu)
cpu              1047 arch/powerpc/include/asm/kvm_ppc.h extern void xics_wake_cpu(int cpu);
cpu               118 arch/powerpc/include/asm/lppaca.h #define lppaca_of(cpu)	(*paca_ptrs[cpu]->lppaca_ptr)
cpu               190 arch/powerpc/include/asm/lppaca.h extern void register_dtl_buffer(int cpu);
cpu               192 arch/powerpc/include/asm/lppaca.h extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
cpu                48 arch/powerpc/include/asm/machdep.h 	unsigned long  	(*get_proc_freq)(unsigned int cpu);
cpu               122 arch/powerpc/include/asm/mce.h 	u16			cpu;
cpu               239 arch/powerpc/include/asm/opal.h int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
cpu               305 arch/powerpc/include/asm/opal.h s64 opal_signal_system_reset(s32 cpu);
cpu               306 arch/powerpc/include/asm/opal.h s64 opal_quiesce(u64 shutdown_type, s32 cpu);
cpu               277 arch/powerpc/include/asm/paca.h extern void initialise_paca(struct paca_struct *new_paca, int cpu);
cpu               280 arch/powerpc/include/asm/paca.h extern void allocate_paca(int cpu);
cpu               286 arch/powerpc/include/asm/paca.h static inline void allocate_paca(int cpu) { };
cpu                53 arch/powerpc/include/asm/plpar_wrappers.h static inline long vpa_call(unsigned long flags, unsigned long cpu,
cpu                58 arch/powerpc/include/asm/plpar_wrappers.h 	return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
cpu                61 arch/powerpc/include/asm/plpar_wrappers.h static inline long unregister_vpa(unsigned long cpu)
cpu                63 arch/powerpc/include/asm/plpar_wrappers.h 	return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
cpu                66 arch/powerpc/include/asm/plpar_wrappers.h static inline long register_vpa(unsigned long cpu, unsigned long vpa)
cpu                68 arch/powerpc/include/asm/plpar_wrappers.h 	return vpa_call(H_VPA_REG_VPA, cpu, vpa);
cpu                71 arch/powerpc/include/asm/plpar_wrappers.h static inline long unregister_slb_shadow(unsigned long cpu)
cpu                73 arch/powerpc/include/asm/plpar_wrappers.h 	return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
cpu                76 arch/powerpc/include/asm/plpar_wrappers.h static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
cpu                78 arch/powerpc/include/asm/plpar_wrappers.h 	return vpa_call(H_VPA_REG_SLB, cpu, vpa);
cpu                81 arch/powerpc/include/asm/plpar_wrappers.h static inline long unregister_dtl(unsigned long cpu)
cpu                83 arch/powerpc/include/asm/plpar_wrappers.h 	return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
cpu                86 arch/powerpc/include/asm/plpar_wrappers.h static inline long register_dtl(unsigned long cpu, unsigned long vpa)
cpu                88 arch/powerpc/include/asm/plpar_wrappers.h 	return vpa_call(H_VPA_REG_DTL, cpu, vpa);
cpu                91 arch/powerpc/include/asm/plpar_wrappers.h extern void vpa_init(int cpu);
cpu               318 arch/powerpc/include/asm/plpar_wrappers.h static inline long plpar_signal_sys_reset(long cpu)
cpu               320 arch/powerpc/include/asm/plpar_wrappers.h 	return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
cpu                12 arch/powerpc/include/asm/powernv.h void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
cpu               178 arch/powerpc/include/asm/ps3.h int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
cpu               181 arch/powerpc/include/asm/ps3.h int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq);
cpu               185 arch/powerpc/include/asm/ps3.h int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
cpu               188 arch/powerpc/include/asm/ps3.h int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
cpu               191 arch/powerpc/include/asm/ps3.h int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
cpu               196 arch/powerpc/include/asm/ps3.h 	enum ps3_cpu_binding cpu, unsigned int *virq);
cpu               494 arch/powerpc/include/asm/ps3.h u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr);
cpu               495 arch/powerpc/include/asm/ps3.h void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
cpu               496 arch/powerpc/include/asm/ps3.h u32 ps3_read_ctr(u32 cpu, u32 ctr);
cpu               497 arch/powerpc/include/asm/ps3.h void ps3_write_ctr(u32 cpu, u32 ctr, u32 val);
cpu               499 arch/powerpc/include/asm/ps3.h u32 ps3_read_pm07_control(u32 cpu, u32 ctr);
cpu               500 arch/powerpc/include/asm/ps3.h void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val);
cpu               501 arch/powerpc/include/asm/ps3.h u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg);
cpu               502 arch/powerpc/include/asm/ps3.h void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
cpu               504 arch/powerpc/include/asm/ps3.h u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr);
cpu               505 arch/powerpc/include/asm/ps3.h void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
cpu               507 arch/powerpc/include/asm/ps3.h void ps3_enable_pm(u32 cpu);
cpu               508 arch/powerpc/include/asm/ps3.h void ps3_disable_pm(u32 cpu);
cpu               509 arch/powerpc/include/asm/ps3.h void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
cpu               510 arch/powerpc/include/asm/ps3.h void ps3_disable_pm_interrupts(u32 cpu);
cpu               512 arch/powerpc/include/asm/ps3.h u32 ps3_get_and_clear_pm_interrupts(u32 cpu);
cpu               514 arch/powerpc/include/asm/ps3.h u32 ps3_get_hw_thread_id(int cpu);
cpu                33 arch/powerpc/include/asm/smp.h extern int cpu_to_chip_id(int cpu);
cpu                38 arch/powerpc/include/asm/smp.h 	void  (*message_pass)(int cpu, int msg);
cpu                40 arch/powerpc/include/asm/smp.h 	void  (*cause_ipi)(int cpu);
cpu                42 arch/powerpc/include/asm/smp.h 	int   (*cause_nmi_ipi)(int cpu);
cpu                55 arch/powerpc/include/asm/smp.h extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
cpu                56 arch/powerpc/include/asm/smp.h extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
cpu                66 arch/powerpc/include/asm/smp.h void generic_cpu_die(unsigned int cpu);
cpu                67 arch/powerpc/include/asm/smp.h void generic_set_cpu_dead(unsigned int cpu);
cpu                68 arch/powerpc/include/asm/smp.h void generic_set_cpu_up(unsigned int cpu);
cpu                69 arch/powerpc/include/asm/smp.h int generic_check_cpu_restart(unsigned int cpu);
cpu                70 arch/powerpc/include/asm/smp.h int is_cpu_dead(unsigned int cpu);
cpu               100 arch/powerpc/include/asm/smp.h static inline int get_hard_smp_processor_id(int cpu)
cpu               102 arch/powerpc/include/asm/smp.h 	return smp_hw_index[cpu];
cpu               105 arch/powerpc/include/asm/smp.h static inline void set_hard_smp_processor_id(int cpu, int phys)
cpu               107 arch/powerpc/include/asm/smp.h 	smp_hw_index[cpu] = phys;
cpu               116 arch/powerpc/include/asm/smp.h static inline struct cpumask *cpu_sibling_mask(int cpu)
cpu               118 arch/powerpc/include/asm/smp.h 	return per_cpu(cpu_sibling_map, cpu);
cpu               121 arch/powerpc/include/asm/smp.h static inline struct cpumask *cpu_core_mask(int cpu)
cpu               123 arch/powerpc/include/asm/smp.h 	return per_cpu(cpu_core_map, cpu);
cpu               126 arch/powerpc/include/asm/smp.h static inline struct cpumask *cpu_l2_cache_mask(int cpu)
cpu               128 arch/powerpc/include/asm/smp.h 	return per_cpu(cpu_l2_cache_map, cpu);
cpu               131 arch/powerpc/include/asm/smp.h static inline struct cpumask *cpu_smallcore_mask(int cpu)
cpu               133 arch/powerpc/include/asm/smp.h 	return per_cpu(cpu_smallcore_map, cpu);
cpu               136 arch/powerpc/include/asm/smp.h extern int cpu_to_core_id(int cpu);
cpu               163 arch/powerpc/include/asm/smp.h extern void smp_muxed_ipi_message_pass(int cpu, int msg);
cpu               164 arch/powerpc/include/asm/smp.h extern void smp_muxed_ipi_set_message(int cpu, int msg);
cpu               173 arch/powerpc/include/asm/smp.h extern void __cpu_die(unsigned int cpu);
cpu               181 arch/powerpc/include/asm/smp.h static inline const struct cpumask *cpu_sibling_mask(int cpu)
cpu               183 arch/powerpc/include/asm/smp.h 	return cpumask_of(cpu);
cpu               186 arch/powerpc/include/asm/smp.h static inline const struct cpumask *cpu_smallcore_mask(int cpu)
cpu               188 arch/powerpc/include/asm/smp.h 	return cpumask_of(cpu);
cpu               194 arch/powerpc/include/asm/smp.h static inline int get_hard_smp_processor_id(int cpu)
cpu               196 arch/powerpc/include/asm/smp.h 	return paca_ptrs[cpu]->hw_cpu_id;
cpu               199 arch/powerpc/include/asm/smp.h static inline void set_hard_smp_processor_id(int cpu, int phys)
cpu               201 arch/powerpc/include/asm/smp.h 	paca_ptrs[cpu]->hw_cpu_id = phys;
cpu               207 arch/powerpc/include/asm/smp.h static inline int get_hard_smp_processor_id(int cpu)
cpu               212 arch/powerpc/include/asm/smp.h static inline void set_hard_smp_processor_id(int cpu, int phys)
cpu               228 arch/powerpc/include/asm/smp.h extern void smp_mpic_setup_cpu(int cpu);
cpu               238 arch/powerpc/include/asm/smp.h extern void arch_send_call_function_single_ipi(int cpu);
cpu                43 arch/powerpc/include/asm/spinlock.h static inline bool vcpu_is_preempted(int cpu)
cpu                47 arch/powerpc/include/asm/spinlock.h 	return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
cpu               191 arch/powerpc/include/asm/spu.h void spu_irq_setaffinity(struct spu *spu, int cpu);
cpu                26 arch/powerpc/include/asm/spu_priv1.h 	void (*cpu_affinity_set) (struct spu *spu, int cpu);
cpu                81 arch/powerpc/include/asm/spu_priv1.h spu_cpu_affinity_set (struct spu *spu, int cpu)
cpu                83 arch/powerpc/include/asm/spu_priv1.h 	spu_priv1_ops->cpu_affinity_set(spu, cpu);
cpu                48 arch/powerpc/include/asm/topology.h static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
cpu                50 arch/powerpc/include/asm/topology.h 	numa_cpu_lookup_table[cpu] = node;
cpu                53 arch/powerpc/include/asm/topology.h static inline int early_cpu_to_node(int cpu)
cpu                57 arch/powerpc/include/asm/topology.h 	nid = numa_cpu_lookup_table[cpu];
cpu                67 arch/powerpc/include/asm/topology.h static inline int early_cpu_to_node(int cpu) { return 0; }
cpu                86 arch/powerpc/include/asm/topology.h static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
cpu                99 arch/powerpc/include/asm/topology.h extern int find_and_online_cpu_nid(int cpu);
cpu               115 arch/powerpc/include/asm/topology.h static inline int find_and_online_cpu_nid(int cpu)
cpu               137 arch/powerpc/include/asm/topology.h #define topology_physical_package_id(cpu)	(cpu_to_chip_id(cpu))
cpu               138 arch/powerpc/include/asm/topology.h #define topology_sibling_cpumask(cpu)	(per_cpu(cpu_sibling_map, cpu))
cpu               139 arch/powerpc/include/asm/topology.h #define topology_core_cpumask(cpu)	(per_cpu(cpu_core_map, cpu))
cpu               140 arch/powerpc/include/asm/topology.h #define topology_core_id(cpu)		(cpu_to_core_id(cpu))
cpu               142 arch/powerpc/include/asm/topology.h int dlpar_cpu_readd(int cpu);
cpu                34 arch/powerpc/include/asm/xics.h extern void icp_native_cause_ipi_rm(int cpu);
cpu                61 arch/powerpc/include/asm/xics.h 	void (*cause_ipi)(int cpu);
cpu               157 arch/powerpc/include/asm/xics.h extern irqreturn_t xics_ipi_dispatch(int cpu);
cpu                93 arch/powerpc/include/asm/xive.h extern int  xive_smp_prepare_cpu(unsigned int cpu);
cpu               101 arch/powerpc/include/asm/xive.h extern void xmon_xive_do_dump(int cpu);
cpu               148 arch/powerpc/include/asm/xive.h static inline int  xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
cpu                98 arch/powerpc/kernel/asm-offsets.c 	OFFSET(TASK_CPU, task_struct, cpu);
cpu               516 arch/powerpc/kernel/asm-offsets.c 	OFFSET(VCPU_CPU, kvm_vcpu, cpu);
cpu               190 arch/powerpc/kernel/cacheinfo.c static void cache_cpu_set(struct cache *cache, int cpu)
cpu               195 arch/powerpc/kernel/cacheinfo.c 		WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
cpu               197 arch/powerpc/kernel/cacheinfo.c 			  cpu, next->ofnode,
cpu               199 arch/powerpc/kernel/cacheinfo.c 		cpumask_set_cpu(cpu, &next->shared_cpu_map);
cpu               642 arch/powerpc/kernel/cacheinfo.c static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
cpu               645 arch/powerpc/kernel/cacheinfo.c 		return cpu_smallcore_mask(cpu);
cpu               655 arch/powerpc/kernel/cacheinfo.c 	int ret, cpu;
cpu               661 arch/powerpc/kernel/cacheinfo.c 		cpu = index_dir_to_cpu(index);
cpu               662 arch/powerpc/kernel/cacheinfo.c 		mask = get_big_core_shared_cpu_map(cpu, cache);
cpu               852 arch/powerpc/kernel/cacheinfo.c static void cache_cpu_clear(struct cache *cache, int cpu)
cpu               857 arch/powerpc/kernel/cacheinfo.c 		WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
cpu               859 arch/powerpc/kernel/cacheinfo.c 			  cpu, cache->ofnode,
cpu               862 arch/powerpc/kernel/cacheinfo.c 		cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
cpu               897 arch/powerpc/kernel/cacheinfo.c 	unsigned int cpu;
cpu               901 arch/powerpc/kernel/cacheinfo.c 	for_each_online_cpu(cpu)
cpu               902 arch/powerpc/kernel/cacheinfo.c 		cacheinfo_cpu_offline(cpu);
cpu               907 arch/powerpc/kernel/cacheinfo.c 	unsigned int cpu;
cpu               911 arch/powerpc/kernel/cacheinfo.c 	for_each_online_cpu(cpu)
cpu               912 arch/powerpc/kernel/cacheinfo.c 		cacheinfo_cpu_online(cpu);
cpu                74 arch/powerpc/kernel/crash.c 	int cpu = smp_processor_id();
cpu                77 arch/powerpc/kernel/crash.c 	if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
cpu                78 arch/powerpc/kernel/crash.c 		crash_save_cpu(regs, cpu);
cpu                79 arch/powerpc/kernel/crash.c 		cpumask_set_cpu(cpu, &cpus_state_saved);
cpu               104 arch/powerpc/kernel/crash.c static void crash_kexec_prepare_cpus(int cpu)
cpu               205 arch/powerpc/kernel/crash.c static void crash_kexec_prepare_cpus(int cpu)
cpu               227 arch/powerpc/kernel/crash.c static void __maybe_unused crash_kexec_wait_realmode(int cpu)
cpu               234 arch/powerpc/kernel/crash.c 		if (i == cpu)
cpu               248 arch/powerpc/kernel/crash.c static inline void crash_kexec_wait_realmode(int cpu) {}
cpu                32 arch/powerpc/kernel/dbell.c void doorbell_global_ipi(int cpu)
cpu                34 arch/powerpc/kernel/dbell.c 	u32 tag = get_hard_smp_processor_id(cpu);
cpu                36 arch/powerpc/kernel/dbell.c 	kvmppc_set_host_ipi(cpu);
cpu                47 arch/powerpc/kernel/dbell.c void doorbell_core_ipi(int cpu)
cpu                49 arch/powerpc/kernel/dbell.c 	u32 tag = cpu_thread_in_core(cpu);
cpu                51 arch/powerpc/kernel/dbell.c 	kvmppc_set_host_ipi(cpu);
cpu                61 arch/powerpc/kernel/dbell.c int doorbell_try_core_ipi(int cpu)
cpu                66 arch/powerpc/kernel/dbell.c 	if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) {
cpu                67 arch/powerpc/kernel/dbell.c 		doorbell_core_ipi(cpu);
cpu                85 arch/powerpc/kernel/irq.c u32 tau_interrupts(unsigned long cpu);
cpu               578 arch/powerpc/kernel/irq.c u64 arch_irq_stat_cpu(unsigned int cpu)
cpu               580 arch/powerpc/kernel/irq.c 	u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
cpu               582 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
cpu               583 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).pmu_irqs;
cpu               584 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).mce_exceptions;
cpu               585 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).spurious_irqs;
cpu               586 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).timer_irqs_others;
cpu               587 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).hmi_exceptions;
cpu               588 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).sreset_irqs;
cpu               590 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
cpu               593 arch/powerpc/kernel/irq.c 	sum += per_cpu(irq_stat, cpu).doorbell_irqs;
cpu               209 arch/powerpc/kernel/machine_kexec_64.c 	int cpu = 0;
cpu               211 arch/powerpc/kernel/machine_kexec_64.c 	for_each_present_cpu(cpu) {
cpu               212 arch/powerpc/kernel/machine_kexec_64.c 		if (!cpu_online(cpu)) {
cpu               214 arch/powerpc/kernel/machine_kexec_64.c 			       cpu);
cpu               215 arch/powerpc/kernel/machine_kexec_64.c 			WARN_ON(cpu_up(cpu));
cpu               107 arch/powerpc/kernel/mce.c 	mce->cpu = get_paca()->paca_index;
cpu               527 arch/powerpc/kernel/mce.c 		level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
cpu               534 arch/powerpc/kernel/mce.c 			level, evt->cpu, current->pid, current->comm,
cpu               538 arch/powerpc/kernel/mce.c 			level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
cpu               541 arch/powerpc/kernel/mce.c 	printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
cpu               545 arch/powerpc/kernel/mce.c 	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
cpu                27 arch/powerpc/kernel/paca.c 				unsigned long limit, int cpu)
cpu                37 arch/powerpc/kernel/paca.c 	if (cpu == boot_cpuid) {
cpu                41 arch/powerpc/kernel/paca.c 		nid = early_cpu_to_node(cpu);
cpu                49 arch/powerpc/kernel/paca.c 	if (cpu == boot_cpuid)
cpu                60 arch/powerpc/kernel/paca.c 					unsigned long limit, int cpu)
cpu               114 arch/powerpc/kernel/paca.c static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
cpu               124 arch/powerpc/kernel/paca.c 		lp = alloc_shared_lppaca(LPPACA_SIZE, 0x400, limit, cpu);
cpu               126 arch/powerpc/kernel/paca.c 		lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
cpu               143 arch/powerpc/kernel/paca.c static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
cpu               147 arch/powerpc/kernel/paca.c 	if (cpu != boot_cpuid) {
cpu               157 arch/powerpc/kernel/paca.c 	s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
cpu               179 arch/powerpc/kernel/paca.c void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int cpu)
cpu               188 arch/powerpc/kernel/paca.c 	new_paca->paca_index = cpu;
cpu               249 arch/powerpc/kernel/paca.c void __init allocate_paca(int cpu)
cpu               254 arch/powerpc/kernel/paca.c 	BUG_ON(cpu >= paca_nr_cpu_ids);
cpu               267 arch/powerpc/kernel/paca.c 				limit, cpu);
cpu               268 arch/powerpc/kernel/paca.c 	paca_ptrs[cpu] = paca;
cpu               270 arch/powerpc/kernel/paca.c 	initialise_paca(paca, cpu);
cpu               272 arch/powerpc/kernel/paca.c 	paca->lppaca_ptr = new_lppaca(cpu, limit);
cpu               275 arch/powerpc/kernel/paca.c 	paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
cpu              1960 arch/powerpc/kernel/process.c 	unsigned long cpu = task_cpu(p);
cpu              1962 arch/powerpc/kernel/process.c 	stack_page = (unsigned long)hardirq_ctx[cpu];
cpu              1966 arch/powerpc/kernel/process.c 	stack_page = (unsigned long)softirq_ctx[cpu];
cpu               876 arch/powerpc/kernel/prom.c int cpu_to_chip_id(int cpu)
cpu               880 arch/powerpc/kernel/prom.c 	np = of_get_cpu_node(cpu, NULL);
cpu               889 arch/powerpc/kernel/prom.c bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
cpu               898 arch/powerpc/kernel/prom.c 		return (int)phys_id == cpu_to_phys_id[cpu];
cpu               901 arch/powerpc/kernel/prom.c 	return (int)phys_id == get_hard_smp_processor_id(cpu);
cpu               120 arch/powerpc/kernel/prom_init.c 	int cpu;
cpu              2131 arch/powerpc/kernel/prom_init.c 		if (cpu_no != prom.cpu) {
cpu              2688 arch/powerpc/kernel/prom_init.c 	hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
cpu              3149 arch/powerpc/kernel/prom_init.c 	prom.cpu = be32_to_cpu(rval);
cpu              3151 arch/powerpc/kernel/prom_init.c 	prom_debug("Booting CPU hw index = %d\n", prom.cpu);
cpu               749 arch/powerpc/kernel/rtas.c 	int cpu;
cpu               772 arch/powerpc/kernel/rtas.c 		for_each_online_cpu(cpu)
cpu               773 arch/powerpc/kernel/rtas.c 			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
cpu               792 arch/powerpc/kernel/rtas.c 	int cpu;
cpu               826 arch/powerpc/kernel/rtas.c 		for_each_online_cpu(cpu)
cpu               827 arch/powerpc/kernel/rtas.c 			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
cpu               867 arch/powerpc/kernel/rtas.c 	int cpu;
cpu               874 arch/powerpc/kernel/rtas.c 	for_each_cpu(cpu, cpus) {
cpu               875 arch/powerpc/kernel/rtas.c 		struct device *dev = get_cpu_device(cpu);
cpu               889 arch/powerpc/kernel/rtas.c 					cpu, cpuret);
cpu               894 arch/powerpc/kernel/rtas.c 				cpumask_shift_right(cpus, cpus, cpu);
cpu               895 arch/powerpc/kernel/rtas.c 				cpumask_shift_left(cpus, cpus, cpu);
cpu               899 arch/powerpc/kernel/rtas.c 				cpumask_clear_cpu(cpu, cpus);
cpu               450 arch/powerpc/kernel/rtasd.c 	unsigned int cpu;
cpu               457 arch/powerpc/kernel/rtasd.c 	cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
cpu               458 arch/powerpc/kernel/rtasd.c         if (cpu >= nr_cpu_ids) {
cpu               459 arch/powerpc/kernel/rtasd.c 		cpu = cpumask_first(cpu_online_mask);
cpu               473 arch/powerpc/kernel/rtasd.c 	schedule_delayed_work_on(cpu, &event_scan_work,
cpu               474 arch/powerpc/kernel/rtasd.c 		__round_jiffies_relative(event_scan_delay, cpu));
cpu               449 arch/powerpc/kernel/setup-common.c 	int cpu = 0;
cpu               476 arch/powerpc/kernel/setup-common.c 				cpu_be = cpu_to_be32(cpu);
cpu               485 arch/powerpc/kernel/setup-common.c 		for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
cpu               489 arch/powerpc/kernel/setup-common.c 			    j, cpu, be32_to_cpu(intserv[j]));
cpu               496 arch/powerpc/kernel/setup-common.c 			set_cpu_present(cpu, avail);
cpu               497 arch/powerpc/kernel/setup-common.c 			set_cpu_possible(cpu, true);
cpu               498 arch/powerpc/kernel/setup-common.c 			cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]);
cpu               499 arch/powerpc/kernel/setup-common.c 			cpu++;
cpu               502 arch/powerpc/kernel/setup-common.c 		if (cpu >= nr_cpu_ids) {
cpu               548 arch/powerpc/kernel/setup-common.c 		for (cpu = 0; cpu < maxcpus; cpu++)
cpu               549 arch/powerpc/kernel/setup-common.c 			set_cpu_possible(cpu, true);
cpu               822 arch/powerpc/kernel/setup-common.c 	int cpu;
cpu               824 arch/powerpc/kernel/setup-common.c 	for_each_possible_cpu(cpu) {
cpu               825 arch/powerpc/kernel/setup-common.c 		if (cpu == smp_processor_id())
cpu               827 arch/powerpc/kernel/setup-common.c 		allocate_paca(cpu);
cpu               828 arch/powerpc/kernel/setup-common.c 		set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
cpu                68 arch/powerpc/kernel/setup.h u32 cpu_temp(unsigned long cpu);
cpu                69 arch/powerpc/kernel/setup.h u32 cpu_temp_both(unsigned long cpu);
cpu                70 arch/powerpc/kernel/setup.h u32 tau_interrupts(unsigned long cpu);
cpu                95 arch/powerpc/kernel/setup_64.c 	int cpu;
cpu                99 arch/powerpc/kernel/setup_64.c 	for_each_possible_cpu(cpu) {
cpu               100 arch/powerpc/kernel/setup_64.c 		int first = cpu_first_thread_sibling(cpu);
cpu               110 arch/powerpc/kernel/setup_64.c 		paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
cpu               571 arch/powerpc/kernel/setup_64.c 	struct device_node *cpu = NULL, *l2, *l3 = NULL;
cpu               591 arch/powerpc/kernel/setup_64.c 		cpu = of_find_node_by_type(NULL, "cpu");
cpu               597 arch/powerpc/kernel/setup_64.c 	if (cpu) {
cpu               598 arch/powerpc/kernel/setup_64.c 		if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
cpu               601 arch/powerpc/kernel/setup_64.c 		if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
cpu               608 arch/powerpc/kernel/setup_64.c 		l2 = of_find_next_cache_node(cpu);
cpu               609 arch/powerpc/kernel/setup_64.c 		of_node_put(cpu);
cpu               661 arch/powerpc/kernel/setup_64.c static void *__init alloc_stack(unsigned long limit, int cpu)
cpu               669 arch/powerpc/kernel/setup_64.c 				     early_cpu_to_node(cpu));
cpu               760 arch/powerpc/kernel/setup_64.c static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
cpu               764 arch/powerpc/kernel/setup_64.c 				      early_cpu_to_node(cpu));
cpu               789 arch/powerpc/kernel/setup_64.c 	unsigned int cpu;
cpu               808 arch/powerpc/kernel/setup_64.c 	for_each_possible_cpu(cpu) {
cpu               809 arch/powerpc/kernel/setup_64.c                 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
cpu               810 arch/powerpc/kernel/setup_64.c 		paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
cpu               906 arch/powerpc/kernel/setup_64.c 	int cpu;
cpu               939 arch/powerpc/kernel/setup_64.c 	for_each_possible_cpu(cpu) {
cpu               940 arch/powerpc/kernel/setup_64.c 		struct paca_struct *paca = paca_ptrs[cpu];
cpu               246 arch/powerpc/kernel/smp.c void smp_muxed_ipi_set_message(int cpu, int msg)
cpu               248 arch/powerpc/kernel/smp.c 	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
cpu               258 arch/powerpc/kernel/smp.c void smp_muxed_ipi_message_pass(int cpu, int msg)
cpu               260 arch/powerpc/kernel/smp.c 	smp_muxed_ipi_set_message(cpu, msg);
cpu               266 arch/powerpc/kernel/smp.c 	smp_ops->cause_ipi(cpu);
cpu               320 arch/powerpc/kernel/smp.c static inline void do_message_pass(int cpu, int msg)
cpu               323 arch/powerpc/kernel/smp.c 		smp_ops->message_pass(cpu, msg);
cpu               326 arch/powerpc/kernel/smp.c 		smp_muxed_ipi_message_pass(cpu, msg);
cpu               330 arch/powerpc/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               333 arch/powerpc/kernel/smp.c 		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
cpu               337 arch/powerpc/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               339 arch/powerpc/kernel/smp.c 	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
cpu               344 arch/powerpc/kernel/smp.c 	unsigned int cpu;
cpu               346 arch/powerpc/kernel/smp.c 	for_each_cpu(cpu, mask)
cpu               347 arch/powerpc/kernel/smp.c 		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
cpu               438 arch/powerpc/kernel/smp.c static void do_smp_send_nmi_ipi(int cpu, bool safe)
cpu               440 arch/powerpc/kernel/smp.c 	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
cpu               443 arch/powerpc/kernel/smp.c 	if (cpu >= 0) {
cpu               444 arch/powerpc/kernel/smp.c 		do_message_pass(cpu, PPC_MSG_NMI_IPI);
cpu               462 arch/powerpc/kernel/smp.c static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
cpu               469 arch/powerpc/kernel/smp.c 	BUG_ON(cpu == me);
cpu               470 arch/powerpc/kernel/smp.c 	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
cpu               486 arch/powerpc/kernel/smp.c 	if (cpu < 0) {
cpu               491 arch/powerpc/kernel/smp.c 		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
cpu               498 arch/powerpc/kernel/smp.c 	do_smp_send_nmi_ipi(cpu, safe);
cpu               527 arch/powerpc/kernel/smp.c int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
cpu               529 arch/powerpc/kernel/smp.c 	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
cpu               532 arch/powerpc/kernel/smp.c int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
cpu               534 arch/powerpc/kernel/smp.c 	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
cpu               541 arch/powerpc/kernel/smp.c 	unsigned int cpu;
cpu               543 arch/powerpc/kernel/smp.c 	for_each_cpu(cpu, mask)
cpu               544 arch/powerpc/kernel/smp.c 		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
cpu               563 arch/powerpc/kernel/smp.c 	int cpu;
cpu               567 arch/powerpc/kernel/smp.c 		for_each_present_cpu(cpu) {
cpu               568 arch/powerpc/kernel/smp.c 			if (cpu_online(cpu))
cpu               579 arch/powerpc/kernel/smp.c 			do_smp_send_nmi_ipi(cpu, false);
cpu               755 arch/powerpc/kernel/smp.c static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
cpu               757 arch/powerpc/kernel/smp.c 	int hw_cpu_id = get_hard_smp_processor_id(cpu);
cpu               774 arch/powerpc/kernel/smp.c static int init_cpu_l1_cache_map(int cpu)
cpu               777 arch/powerpc/kernel/smp.c 	struct device_node *dn = of_get_cpu_node(cpu, NULL);
cpu               781 arch/powerpc/kernel/smp.c 	int first_thread = cpu_first_thread_sibling(cpu);
cpu               791 arch/powerpc/kernel/smp.c 	zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
cpu               793 arch/powerpc/kernel/smp.c 				cpu_to_node(cpu));
cpu               795 arch/powerpc/kernel/smp.c 	cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
cpu               813 arch/powerpc/kernel/smp.c 			cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
cpu               823 arch/powerpc/kernel/smp.c 	int cpu;
cpu               825 arch/powerpc/kernel/smp.c 	for_each_possible_cpu(cpu) {
cpu               826 arch/powerpc/kernel/smp.c 		int err = init_cpu_l1_cache_map(cpu);
cpu               831 arch/powerpc/kernel/smp.c 		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
cpu               833 arch/powerpc/kernel/smp.c 					cpu_to_node(cpu));
cpu               842 arch/powerpc/kernel/smp.c 	unsigned int cpu;
cpu               856 arch/powerpc/kernel/smp.c 	for_each_possible_cpu(cpu) {
cpu               857 arch/powerpc/kernel/smp.c 		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
cpu               858 arch/powerpc/kernel/smp.c 					GFP_KERNEL, cpu_to_node(cpu));
cpu               859 arch/powerpc/kernel/smp.c 		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
cpu               860 arch/powerpc/kernel/smp.c 					GFP_KERNEL, cpu_to_node(cpu));
cpu               861 arch/powerpc/kernel/smp.c 		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
cpu               862 arch/powerpc/kernel/smp.c 					GFP_KERNEL, cpu_to_node(cpu));
cpu               866 arch/powerpc/kernel/smp.c 		if (cpu_present(cpu)) {
cpu               867 arch/powerpc/kernel/smp.c 			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
cpu               868 arch/powerpc/kernel/smp.c 			set_cpu_numa_mem(cpu,
cpu               869 arch/powerpc/kernel/smp.c 				local_memory_node(numa_cpu_lookup_table[cpu]));
cpu               902 arch/powerpc/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               904 arch/powerpc/kernel/smp.c 	if (cpu == boot_cpuid)
cpu               907 arch/powerpc/kernel/smp.c 	set_cpu_online(cpu, false);
cpu               929 arch/powerpc/kernel/smp.c void generic_cpu_die(unsigned int cpu)
cpu               935 arch/powerpc/kernel/smp.c 		if (is_cpu_dead(cpu))
cpu               939 arch/powerpc/kernel/smp.c 	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
cpu               942 arch/powerpc/kernel/smp.c void generic_set_cpu_dead(unsigned int cpu)
cpu               944 arch/powerpc/kernel/smp.c 	per_cpu(cpu_state, cpu) = CPU_DEAD;
cpu               952 arch/powerpc/kernel/smp.c void generic_set_cpu_up(unsigned int cpu)
cpu               954 arch/powerpc/kernel/smp.c 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
cpu               957 arch/powerpc/kernel/smp.c int generic_check_cpu_restart(unsigned int cpu)
cpu               959 arch/powerpc/kernel/smp.c 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
cpu               962 arch/powerpc/kernel/smp.c int is_cpu_dead(unsigned int cpu)
cpu               964 arch/powerpc/kernel/smp.c 	return per_cpu(cpu_state, cpu) == CPU_DEAD;
cpu               978 arch/powerpc/kernel/smp.c static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
cpu               981 arch/powerpc/kernel/smp.c 	paca_ptrs[cpu]->__current = idle;
cpu               982 arch/powerpc/kernel/smp.c 	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
cpu               985 arch/powerpc/kernel/smp.c 	idle->cpu = cpu;
cpu               986 arch/powerpc/kernel/smp.c 	secondary_current = current_set[cpu] = idle;
cpu               989 arch/powerpc/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu               997 arch/powerpc/kernel/smp.c 	    cpu_thread_in_subcore(cpu))
cpu              1001 arch/powerpc/kernel/smp.c 	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
cpu              1004 arch/powerpc/kernel/smp.c 	cpu_idle_thread_init(cpu, tidle);
cpu              1011 arch/powerpc/kernel/smp.c 		rc = smp_ops->prepare_cpu(cpu);
cpu              1019 arch/powerpc/kernel/smp.c 	cpu_callin_map[cpu] = 0;
cpu              1028 arch/powerpc/kernel/smp.c 	DBG("smp: kicking cpu %d\n", cpu);
cpu              1029 arch/powerpc/kernel/smp.c 	rc = smp_ops->kick_cpu(cpu);
cpu              1031 arch/powerpc/kernel/smp.c 		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
cpu              1041 arch/powerpc/kernel/smp.c 		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
cpu              1049 arch/powerpc/kernel/smp.c 		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
cpu              1053 arch/powerpc/kernel/smp.c 	if (!cpu_callin_map[cpu]) {
cpu              1054 arch/powerpc/kernel/smp.c 		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
cpu              1058 arch/powerpc/kernel/smp.c 	DBG("Processor %u found.\n", cpu);
cpu              1064 arch/powerpc/kernel/smp.c 	spin_until_cond(cpu_online(cpu));
cpu              1072 arch/powerpc/kernel/smp.c int cpu_to_core_id(int cpu)
cpu              1078 arch/powerpc/kernel/smp.c 	np = of_get_cpu_node(cpu, NULL);
cpu              1094 arch/powerpc/kernel/smp.c int cpu_core_index_of_thread(int cpu)
cpu              1096 arch/powerpc/kernel/smp.c 	return cpu >> threads_shift;
cpu              1109 arch/powerpc/kernel/smp.c static struct device_node *cpu_to_l2cache(int cpu)
cpu              1114 arch/powerpc/kernel/smp.c 	if (!cpu_present(cpu))
cpu              1117 arch/powerpc/kernel/smp.c 	np = of_get_cpu_node(cpu, NULL);
cpu              1128 arch/powerpc/kernel/smp.c static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
cpu              1133 arch/powerpc/kernel/smp.c 	l2_cache = cpu_to_l2cache(cpu);
cpu              1147 arch/powerpc/kernel/smp.c 			set_cpus_related(cpu, i, mask_fn);
cpu              1157 arch/powerpc/kernel/smp.c static void remove_cpu_from_masks(int cpu)
cpu              1162 arch/powerpc/kernel/smp.c 	for_each_cpu(i, cpu_core_mask(cpu)) {
cpu              1163 arch/powerpc/kernel/smp.c 		set_cpus_unrelated(cpu, i, cpu_core_mask);
cpu              1164 arch/powerpc/kernel/smp.c 		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
cpu              1165 arch/powerpc/kernel/smp.c 		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
cpu              1167 arch/powerpc/kernel/smp.c 			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
cpu              1172 arch/powerpc/kernel/smp.c static inline void add_cpu_to_smallcore_masks(int cpu)
cpu              1174 arch/powerpc/kernel/smp.c 	struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
cpu              1175 arch/powerpc/kernel/smp.c 	int i, first_thread = cpu_first_thread_sibling(cpu);
cpu              1180 arch/powerpc/kernel/smp.c 	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
cpu              1184 arch/powerpc/kernel/smp.c 			set_cpus_related(i, cpu, cpu_smallcore_mask);
cpu              1188 arch/powerpc/kernel/smp.c static void add_cpu_to_masks(int cpu)
cpu              1190 arch/powerpc/kernel/smp.c 	int first_thread = cpu_first_thread_sibling(cpu);
cpu              1191 arch/powerpc/kernel/smp.c 	int chipid = cpu_to_chip_id(cpu);
cpu              1198 arch/powerpc/kernel/smp.c 	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
cpu              1202 arch/powerpc/kernel/smp.c 			set_cpus_related(i, cpu, cpu_sibling_mask);
cpu              1204 arch/powerpc/kernel/smp.c 	add_cpu_to_smallcore_masks(cpu);
cpu              1209 arch/powerpc/kernel/smp.c 	for_each_cpu(i, cpu_sibling_mask(cpu))
cpu              1210 arch/powerpc/kernel/smp.c 		set_cpus_related(cpu, i, cpu_l2_cache_mask);
cpu              1211 arch/powerpc/kernel/smp.c 	update_mask_by_l2(cpu, cpu_l2_cache_mask);
cpu              1217 arch/powerpc/kernel/smp.c 	for_each_cpu(i, cpu_l2_cache_mask(cpu))
cpu              1218 arch/powerpc/kernel/smp.c 		set_cpus_related(cpu, i, cpu_core_mask);
cpu              1225 arch/powerpc/kernel/smp.c 			set_cpus_related(cpu, i, cpu_core_mask);
cpu              1233 arch/powerpc/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu              1239 arch/powerpc/kernel/smp.c 	smp_store_cpu_info(cpu);
cpu              1242 arch/powerpc/kernel/smp.c 	cpu_callin_map[cpu] = 1;
cpu              1245 arch/powerpc/kernel/smp.c 		smp_ops->setup_cpu(cpu);
cpu              1258 arch/powerpc/kernel/smp.c 	add_cpu_to_masks(cpu);
cpu              1266 arch/powerpc/kernel/smp.c 	if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
cpu              1269 arch/powerpc/kernel/smp.c 	set_numa_node(numa_cpu_lookup_table[cpu]);
cpu              1270 arch/powerpc/kernel/smp.c 	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
cpu              1273 arch/powerpc/kernel/smp.c 	notify_cpu_starting(cpu);
cpu              1274 arch/powerpc/kernel/smp.c 	set_cpu_online(cpu, true);
cpu              1330 arch/powerpc/kernel/smp.c static const struct cpumask *shared_cache_mask(int cpu)
cpu              1332 arch/powerpc/kernel/smp.c 	return cpu_l2_cache_mask(cpu);
cpu              1336 arch/powerpc/kernel/smp.c static const struct cpumask *smallcore_smt_mask(int cpu)
cpu              1338 arch/powerpc/kernel/smp.c 	return cpu_smallcore_mask(cpu);
cpu              1392 arch/powerpc/kernel/smp.c 	int cpu = smp_processor_id();
cpu              1405 arch/powerpc/kernel/smp.c 	remove_cpu_from_masks(cpu);
cpu              1410 arch/powerpc/kernel/smp.c void __cpu_die(unsigned int cpu)
cpu              1413 arch/powerpc/kernel/smp.c 		smp_ops->cpu_die(cpu);
cpu               233 arch/powerpc/kernel/stacktrace.c 	unsigned int cpu;
cpu               235 arch/powerpc/kernel/stacktrace.c 	for_each_cpu(cpu, mask) {
cpu               236 arch/powerpc/kernel/stacktrace.c 		if (cpu == smp_processor_id())
cpu               239 arch/powerpc/kernel/stacktrace.c 			smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
cpu               242 arch/powerpc/kernel/stacktrace.c 	for_each_cpu(cpu, mask) {
cpu               243 arch/powerpc/kernel/stacktrace.c 		struct paca_struct *p = paca_ptrs[cpu];
cpu               245 arch/powerpc/kernel/stacktrace.c 		cpumask_clear_cpu(cpu, mask);
cpu               247 arch/powerpc/kernel/stacktrace.c 		pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
cpu                32 arch/powerpc/kernel/sysfs.c static DEFINE_PER_CPU(struct cpu, cpu_devices);
cpu                48 arch/powerpc/kernel/sysfs.c 	struct cpu *cpu = container_of(dev, struct cpu, dev);
cpu                56 arch/powerpc/kernel/sysfs.c 	per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
cpu                64 arch/powerpc/kernel/sysfs.c 	struct cpu *cpu = container_of(dev, struct cpu, dev);
cpu                66 arch/powerpc/kernel/sysfs.c 	return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
cpu                74 arch/powerpc/kernel/sysfs.c 	unsigned int cpu;
cpu                81 arch/powerpc/kernel/sysfs.c 	for_each_possible_cpu(cpu)
cpu                82 arch/powerpc/kernel/sysfs.c 		per_cpu(smt_snooze_delay, cpu) = snooze;
cpu               122 arch/powerpc/kernel/sysfs.c 	unsigned int cpu = dev->id;
cpu               124 arch/powerpc/kernel/sysfs.c 	smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
cpu               151 arch/powerpc/kernel/sysfs.c 	unsigned int cpu = dev->id;
cpu               159 arch/powerpc/kernel/sysfs.c 	smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
cpu               171 arch/powerpc/kernel/sysfs.c 	unsigned int cpu = dev->id;
cpu               174 arch/powerpc/kernel/sysfs.c 		smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
cpu               220 arch/powerpc/kernel/sysfs.c 	unsigned int cpu = dev->id;
cpu               234 arch/powerpc/kernel/sysfs.c 	smp_call_function_single(cpu, set_pw20_wait_entry_bit,
cpu               244 arch/powerpc/kernel/sysfs.c 	unsigned int cpu = dev->id;
cpu               246 arch/powerpc/kernel/sysfs.c 	smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
cpu               273 arch/powerpc/kernel/sysfs.c 	unsigned int cpu = dev->id;
cpu               281 arch/powerpc/kernel/sysfs.c 	smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
cpu               293 arch/powerpc/kernel/sysfs.c 	unsigned int cpu = dev->id;
cpu               296 arch/powerpc/kernel/sysfs.c 		smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
cpu               342 arch/powerpc/kernel/sysfs.c 	unsigned int cpu = dev->id;
cpu               356 arch/powerpc/kernel/sysfs.c 	smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
cpu               426 arch/powerpc/kernel/sysfs.c 	struct cpu *cpu = container_of(dev, struct cpu, dev); \
cpu               428 arch/powerpc/kernel/sysfs.c 	smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1);	\
cpu               435 arch/powerpc/kernel/sysfs.c 	struct cpu *cpu = container_of(dev, struct cpu, dev); \
cpu               440 arch/powerpc/kernel/sysfs.c 	smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
cpu               596 arch/powerpc/kernel/sysfs.c 		int cpu;
cpu               599 arch/powerpc/kernel/sysfs.c 		for_each_possible_cpu(cpu)
cpu               600 arch/powerpc/kernel/sysfs.c 			paca_ptrs[cpu]->dscr_default = dscr_default;
cpu               736 arch/powerpc/kernel/sysfs.c static int register_cpu_online(unsigned int cpu)
cpu               738 arch/powerpc/kernel/sysfs.c 	struct cpu *c = &per_cpu(cpu_devices, cpu);
cpu               745 arch/powerpc/kernel/sysfs.c 		s->of_node = of_get_cpu_node(cpu, NULL);
cpu               822 arch/powerpc/kernel/sysfs.c 	cacheinfo_cpu_online(cpu);
cpu               827 arch/powerpc/kernel/sysfs.c static int unregister_cpu_online(unsigned int cpu)
cpu               829 arch/powerpc/kernel/sysfs.c 	struct cpu *c = &per_cpu(cpu_devices, cpu);
cpu               908 arch/powerpc/kernel/sysfs.c 	cacheinfo_cpu_offline(cpu);
cpu               939 arch/powerpc/kernel/sysfs.c 	int cpu;
cpu               943 arch/powerpc/kernel/sysfs.c 	for_each_possible_cpu(cpu) {
cpu               944 arch/powerpc/kernel/sysfs.c 		device_create_file(get_cpu_device(cpu), attr);
cpu               954 arch/powerpc/kernel/sysfs.c 	int cpu;
cpu               960 arch/powerpc/kernel/sysfs.c 	for_each_possible_cpu(cpu) {
cpu               961 arch/powerpc/kernel/sysfs.c 		dev = get_cpu_device(cpu);
cpu               974 arch/powerpc/kernel/sysfs.c 	int cpu;
cpu               978 arch/powerpc/kernel/sysfs.c 	for_each_possible_cpu(cpu) {
cpu               979 arch/powerpc/kernel/sysfs.c 		device_remove_file(get_cpu_device(cpu), attr);
cpu               988 arch/powerpc/kernel/sysfs.c 	int cpu;
cpu               993 arch/powerpc/kernel/sysfs.c 	for_each_possible_cpu(cpu) {
cpu               994 arch/powerpc/kernel/sysfs.c 		dev = get_cpu_device(cpu);
cpu              1041 arch/powerpc/kernel/sysfs.c 	struct cpu *cpu = container_of(dev, struct cpu, dev);
cpu              1043 arch/powerpc/kernel/sysfs.c 	return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
cpu              1049 arch/powerpc/kernel/sysfs.c 	int cpu, r;
cpu              1053 arch/powerpc/kernel/sysfs.c 	for_each_possible_cpu(cpu) {
cpu              1054 arch/powerpc/kernel/sysfs.c 		struct cpu *c = &per_cpu(cpu_devices, cpu);
cpu              1066 arch/powerpc/kernel/sysfs.c 		if (cpu_online(cpu) || c->hotpluggable) {
cpu              1067 arch/powerpc/kernel/sysfs.c 			register_cpu(c, cpu);
cpu                56 arch/powerpc/kernel/tau_6xx.c static void set_thresholds(unsigned long cpu)
cpu                63 arch/powerpc/kernel/tau_6xx.c 	mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
cpu                68 arch/powerpc/kernel/tau_6xx.c 	mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
cpu                71 arch/powerpc/kernel/tau_6xx.c 	mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
cpu                72 arch/powerpc/kernel/tau_6xx.c 	mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
cpu                76 arch/powerpc/kernel/tau_6xx.c static void TAUupdate(int cpu)
cpu                88 arch/powerpc/kernel/tau_6xx.c 			if (tau[cpu].low >= step_size){
cpu                89 arch/powerpc/kernel/tau_6xx.c 				tau[cpu].low -= step_size;
cpu                90 arch/powerpc/kernel/tau_6xx.c 				tau[cpu].high -= (step_size - window_expand);
cpu                92 arch/powerpc/kernel/tau_6xx.c 			tau[cpu].grew = 1;
cpu               100 arch/powerpc/kernel/tau_6xx.c 			if (tau[cpu].high <= 127-step_size){
cpu               101 arch/powerpc/kernel/tau_6xx.c 				tau[cpu].low += (step_size - window_expand);
cpu               102 arch/powerpc/kernel/tau_6xx.c 				tau[cpu].high += step_size;
cpu               104 arch/powerpc/kernel/tau_6xx.c 			tau[cpu].grew = 1;
cpu               112 arch/powerpc/kernel/tau_6xx.c 	printk("grew = %d\n", tau[cpu].grew);
cpu               116 arch/powerpc/kernel/tau_6xx.c 	set_thresholds(cpu);
cpu               129 arch/powerpc/kernel/tau_6xx.c 	int cpu = smp_processor_id();
cpu               132 arch/powerpc/kernel/tau_6xx.c 	tau[cpu].interrupts++;
cpu               134 arch/powerpc/kernel/tau_6xx.c 	TAUupdate(cpu);
cpu               142 arch/powerpc/kernel/tau_6xx.c 	int cpu;
cpu               149 arch/powerpc/kernel/tau_6xx.c 	cpu = smp_processor_id();
cpu               152 arch/powerpc/kernel/tau_6xx.c 	TAUupdate(cpu);
cpu               155 arch/powerpc/kernel/tau_6xx.c 	size = tau[cpu].high - tau[cpu].low;
cpu               156 arch/powerpc/kernel/tau_6xx.c 	if (size > min_window && ! tau[cpu].grew) {
cpu               160 arch/powerpc/kernel/tau_6xx.c 			tau[cpu].low += shrink;
cpu               161 arch/powerpc/kernel/tau_6xx.c 			tau[cpu].high -= shrink;
cpu               163 arch/powerpc/kernel/tau_6xx.c 			tau[cpu].low += 1;
cpu               165 arch/powerpc/kernel/tau_6xx.c 			if ((tau[cpu].high - tau[cpu].low) != min_window){
cpu               172 arch/powerpc/kernel/tau_6xx.c 	tau[cpu].grew = 0;
cpu               174 arch/powerpc/kernel/tau_6xx.c 	set_thresholds(cpu);
cpu               213 arch/powerpc/kernel/tau_6xx.c 	unsigned long cpu = smp_processor_id();
cpu               217 arch/powerpc/kernel/tau_6xx.c 	tau[cpu].low = 5;
cpu               218 arch/powerpc/kernel/tau_6xx.c 	tau[cpu].high = 120;
cpu               220 arch/powerpc/kernel/tau_6xx.c 	set_thresholds(cpu);
cpu               260 arch/powerpc/kernel/tau_6xx.c u32 cpu_temp_both(unsigned long cpu)
cpu               262 arch/powerpc/kernel/tau_6xx.c 	return ((tau[cpu].high << 16) | tau[cpu].low);
cpu               265 arch/powerpc/kernel/tau_6xx.c u32 cpu_temp(unsigned long cpu)
cpu               267 arch/powerpc/kernel/tau_6xx.c 	return ((tau[cpu].high + tau[cpu].low) / 2);
cpu               270 arch/powerpc/kernel/tau_6xx.c u32 tau_interrupts(unsigned long cpu)
cpu               272 arch/powerpc/kernel/tau_6xx.c 	return (tau[cpu].interrupts);
cpu               745 arch/powerpc/kernel/time.c 	struct device_node *cpu;
cpu               750 arch/powerpc/kernel/time.c 	cpu = of_find_node_by_type(NULL, "cpu");
cpu               752 arch/powerpc/kernel/time.c 	if (cpu) {
cpu               753 arch/powerpc/kernel/time.c 		fp = of_get_property(cpu, name, NULL);
cpu               759 arch/powerpc/kernel/time.c 		of_node_put(cpu);
cpu               993 arch/powerpc/kernel/time.c static void register_decrementer_clockevent(int cpu)
cpu               995 arch/powerpc/kernel/time.c 	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
cpu               998 arch/powerpc/kernel/time.c 	dec->cpumask = cpumask_of(cpu);
cpu              1003 arch/powerpc/kernel/time.c 		    dec->name, dec->mult, dec->shift, cpu);
cpu              1028 arch/powerpc/kernel/time.c 	struct device_node *cpu;
cpu              1035 arch/powerpc/kernel/time.c 	cpu = of_find_node_by_type(NULL, "cpu");
cpu              1037 arch/powerpc/kernel/time.c 	if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
cpu              1047 arch/powerpc/kernel/time.c 	of_node_put(cpu);
cpu               183 arch/powerpc/kernel/traps.c 	int cpu;
cpu               190 arch/powerpc/kernel/traps.c 	cpu = smp_processor_id();
cpu               192 arch/powerpc/kernel/traps.c 		if (cpu == die_owner)
cpu               198 arch/powerpc/kernel/traps.c 	die_owner = cpu;
cpu               673 arch/powerpc/kernel/vdso.c 	unsigned long cpu, node, val;
cpu               679 arch/powerpc/kernel/vdso.c 	cpu = get_cpu();
cpu               680 arch/powerpc/kernel/vdso.c 	WARN_ON_ONCE(cpu > 0xffff);
cpu               682 arch/powerpc/kernel/vdso.c 	node = cpu_to_node(cpu);
cpu               685 arch/powerpc/kernel/vdso.c 	val = (cpu & 0xfff) | ((node & 0xffff) << 16);
cpu               114 arch/powerpc/kernel/watchdog.c 	int cpu = raw_smp_processor_id();
cpu               117 arch/powerpc/kernel/watchdog.c 	pr_emerg("CPU %d Hard LOCKUP\n", cpu);
cpu               119 arch/powerpc/kernel/watchdog.c 		 cpu, tb, per_cpu(wd_timer_tb, cpu),
cpu               120 arch/powerpc/kernel/watchdog.c 		 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
cpu               142 arch/powerpc/kernel/watchdog.c static void set_cpu_stuck(int cpu, u64 tb)
cpu               144 arch/powerpc/kernel/watchdog.c 	set_cpumask_stuck(cpumask_of(cpu), tb);
cpu               147 arch/powerpc/kernel/watchdog.c static void watchdog_smp_panic(int cpu, u64 tb)
cpu               156 arch/powerpc/kernel/watchdog.c 	if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
cpu               162 arch/powerpc/kernel/watchdog.c 		 cpu, cpumask_pr_args(&wd_smp_cpus_pending));
cpu               164 arch/powerpc/kernel/watchdog.c 		 cpu, tb, wd_smp_last_reset_tb,
cpu               173 arch/powerpc/kernel/watchdog.c 			if (c == cpu)
cpu               201 arch/powerpc/kernel/watchdog.c static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
cpu               203 arch/powerpc/kernel/watchdog.c 	if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
cpu               204 arch/powerpc/kernel/watchdog.c 		if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
cpu               211 arch/powerpc/kernel/watchdog.c 				 cpu, tb);
cpu               218 arch/powerpc/kernel/watchdog.c 			cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
cpu               223 arch/powerpc/kernel/watchdog.c 	cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
cpu               238 arch/powerpc/kernel/watchdog.c static void watchdog_timer_interrupt(int cpu)
cpu               242 arch/powerpc/kernel/watchdog.c 	per_cpu(wd_timer_tb, cpu) = tb;
cpu               244 arch/powerpc/kernel/watchdog.c 	wd_smp_clear_cpu_pending(cpu, tb);
cpu               247 arch/powerpc/kernel/watchdog.c 		watchdog_smp_panic(cpu, tb);
cpu               253 arch/powerpc/kernel/watchdog.c 	int cpu = raw_smp_processor_id();
cpu               256 arch/powerpc/kernel/watchdog.c 	if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
cpu               264 arch/powerpc/kernel/watchdog.c 	if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
cpu               266 arch/powerpc/kernel/watchdog.c 		if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
cpu               270 arch/powerpc/kernel/watchdog.c 		set_cpu_stuck(cpu, tb);
cpu               273 arch/powerpc/kernel/watchdog.c 			 cpu, (void *)regs->nip);
cpu               275 arch/powerpc/kernel/watchdog.c 			 cpu, tb, per_cpu(wd_timer_tb, cpu),
cpu               276 arch/powerpc/kernel/watchdog.c 			 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
cpu               298 arch/powerpc/kernel/watchdog.c 	int cpu = smp_processor_id();
cpu               303 arch/powerpc/kernel/watchdog.c 	if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
cpu               306 arch/powerpc/kernel/watchdog.c 	watchdog_timer_interrupt(cpu);
cpu               316 arch/powerpc/kernel/watchdog.c 	int cpu = smp_processor_id();
cpu               319 arch/powerpc/kernel/watchdog.c 	if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
cpu               320 arch/powerpc/kernel/watchdog.c 		per_cpu(wd_timer_tb, cpu) = tb;
cpu               321 arch/powerpc/kernel/watchdog.c 		wd_smp_clear_cpu_pending(cpu, tb);
cpu               329 arch/powerpc/kernel/watchdog.c 	int cpu = smp_processor_id();
cpu               332 arch/powerpc/kernel/watchdog.c 	if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
cpu               340 arch/powerpc/kernel/watchdog.c 	if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
cpu               344 arch/powerpc/kernel/watchdog.c 	cpumask_set_cpu(cpu, &wd_cpus_enabled);
cpu               346 arch/powerpc/kernel/watchdog.c 		cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
cpu               359 arch/powerpc/kernel/watchdog.c static int start_watchdog_on_cpu(unsigned int cpu)
cpu               361 arch/powerpc/kernel/watchdog.c 	return smp_call_function_single(cpu, start_watchdog, NULL, true);
cpu               367 arch/powerpc/kernel/watchdog.c 	int cpu = smp_processor_id();
cpu               370 arch/powerpc/kernel/watchdog.c 	if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
cpu               376 arch/powerpc/kernel/watchdog.c 	cpumask_clear_cpu(cpu, &wd_cpus_enabled);
cpu               379 arch/powerpc/kernel/watchdog.c 	wd_smp_clear_cpu_pending(cpu, get_tb());
cpu               382 arch/powerpc/kernel/watchdog.c static int stop_watchdog_on_cpu(unsigned int cpu)
cpu               384 arch/powerpc/kernel/watchdog.c 	return smp_call_function_single(cpu, stop_watchdog, NULL, true);
cpu               400 arch/powerpc/kernel/watchdog.c 	int cpu;
cpu               402 arch/powerpc/kernel/watchdog.c 	for_each_cpu(cpu, &wd_cpus_enabled)
cpu               403 arch/powerpc/kernel/watchdog.c 		stop_watchdog_on_cpu(cpu);
cpu               408 arch/powerpc/kernel/watchdog.c 	int cpu;
cpu               411 arch/powerpc/kernel/watchdog.c 	for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
cpu               412 arch/powerpc/kernel/watchdog.c 		start_watchdog_on_cpu(cpu);
cpu               775 arch/powerpc/kvm/book3s.c void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu               777 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
cpu               185 arch/powerpc/kvm/book3s_hv.c static bool kvmppc_ipi_thread(int cpu)
cpu               195 arch/powerpc/kvm/book3s_hv.c 		msg |= get_hard_smp_processor_id(cpu);
cpu               204 arch/powerpc/kvm/book3s_hv.c 		if (cpu_first_thread_sibling(cpu) ==
cpu               206 arch/powerpc/kvm/book3s_hv.c 			msg |= cpu_thread_in_core(cpu);
cpu               216 arch/powerpc/kvm/book3s_hv.c 	if (cpu >= 0 && cpu < nr_cpu_ids) {
cpu               217 arch/powerpc/kvm/book3s_hv.c 		if (paca_ptrs[cpu]->kvm_hstate.xics_phys) {
cpu               218 arch/powerpc/kvm/book3s_hv.c 			xics_wake_cpu(cpu);
cpu               221 arch/powerpc/kvm/book3s_hv.c 		opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
cpu               231 arch/powerpc/kvm/book3s_hv.c 	int cpu;
cpu               240 arch/powerpc/kvm/book3s_hv.c 	cpu = READ_ONCE(vcpu->arch.thread_cpu);
cpu               241 arch/powerpc/kvm/book3s_hv.c 	if (cpu >= 0 && kvmppc_ipi_thread(cpu))
cpu               245 arch/powerpc/kvm/book3s_hv.c 	cpu = vcpu->cpu;
cpu               246 arch/powerpc/kvm/book3s_hv.c 	if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
cpu               247 arch/powerpc/kvm/book3s_hv.c 		smp_send_reschedule(cpu);
cpu               304 arch/powerpc/kvm/book3s_hv.c static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
cpu              1163 arch/powerpc/kvm/book3s_hv.c 	int thr, cpu, pcpu, nthreads;
cpu              1169 arch/powerpc/kvm/book3s_hv.c 	cpu = vcpu->vcpu_id & ~(nthreads - 1);
cpu              1170 arch/powerpc/kvm/book3s_hv.c 	for (thr = 0; thr < nthreads; ++thr, ++cpu) {
cpu              1171 arch/powerpc/kvm/book3s_hv.c 		v = kvmppc_find_vcpu(vcpu->kvm, cpu);
cpu              1179 arch/powerpc/kvm/book3s_hv.c 		pcpu = READ_ONCE(v->cpu);
cpu              2488 arch/powerpc/kvm/book3s_hv.c static int kvmppc_grab_hwthread(int cpu)
cpu              2493 arch/powerpc/kvm/book3s_hv.c 	tpaca = paca_ptrs[cpu];
cpu              2514 arch/powerpc/kvm/book3s_hv.c 			pr_err("KVM: couldn't grab cpu %d\n", cpu);
cpu              2522 arch/powerpc/kvm/book3s_hv.c static void kvmppc_release_hwthread(int cpu)
cpu              2526 arch/powerpc/kvm/book3s_hv.c 	tpaca = paca_ptrs[cpu];
cpu              2533 arch/powerpc/kvm/book3s_hv.c static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
cpu              2539 arch/powerpc/kvm/book3s_hv.c 	cpu = cpu_first_thread_sibling(cpu);
cpu              2541 arch/powerpc/kvm/book3s_hv.c 		cpumask_set_cpu(cpu, &nested->need_tlb_flush);
cpu              2544 arch/powerpc/kvm/book3s_hv.c 		cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
cpu              2554 arch/powerpc/kvm/book3s_hv.c 		if (cpumask_test_cpu(cpu + i, cpu_in_guest))
cpu              2555 arch/powerpc/kvm/book3s_hv.c 			smp_call_function_single(cpu + i, do_nothing, NULL, 1);
cpu              2598 arch/powerpc/kvm/book3s_hv.c 	int cpu;
cpu              2602 arch/powerpc/kvm/book3s_hv.c 	cpu = vc->pcpu;
cpu              2608 arch/powerpc/kvm/book3s_hv.c 		cpu += vcpu->arch.ptid;
cpu              2609 arch/powerpc/kvm/book3s_hv.c 		vcpu->cpu = vc->pcpu;
cpu              2610 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.thread_cpu = cpu;
cpu              2611 arch/powerpc/kvm/book3s_hv.c 		cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
cpu              2613 arch/powerpc/kvm/book3s_hv.c 	tpaca = paca_ptrs[cpu];
cpu              2615 arch/powerpc/kvm/book3s_hv.c 	tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
cpu              2620 arch/powerpc/kvm/book3s_hv.c 	if (cpu != smp_processor_id())
cpu              2621 arch/powerpc/kvm/book3s_hv.c 		kvmppc_ipi_thread(cpu);
cpu              2626 arch/powerpc/kvm/book3s_hv.c 	int cpu = smp_processor_id();
cpu              2639 arch/powerpc/kvm/book3s_hv.c 			if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
cpu              2649 arch/powerpc/kvm/book3s_hv.c 		if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
cpu              2650 arch/powerpc/kvm/book3s_hv.c 			pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
cpu              2660 arch/powerpc/kvm/book3s_hv.c 	int cpu = smp_processor_id();
cpu              2664 arch/powerpc/kvm/book3s_hv.c 	if (cpu_thread_in_subcore(cpu))
cpu              2669 arch/powerpc/kvm/book3s_hv.c 		if (cpu_online(cpu + thr))
cpu              2674 arch/powerpc/kvm/book3s_hv.c 		if (kvmppc_grab_hwthread(cpu + thr)) {
cpu              2677 arch/powerpc/kvm/book3s_hv.c 				kvmppc_release_hwthread(cpu + thr);
cpu              2699 arch/powerpc/kvm/book3s_hv.c 	int cpu;
cpu              2701 arch/powerpc/kvm/book3s_hv.c 	for_each_possible_cpu(cpu) {
cpu              2702 arch/powerpc/kvm/book3s_hv.c 		struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
cpu              2983 arch/powerpc/kvm/book3s_hv.c static inline int kvmppc_clear_host_core(unsigned int cpu)
cpu              2987 arch/powerpc/kvm/book3s_hv.c 	if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
cpu              2994 arch/powerpc/kvm/book3s_hv.c 	core = cpu >> threads_shift;
cpu              3004 arch/powerpc/kvm/book3s_hv.c static inline int kvmppc_set_host_core(unsigned int cpu)
cpu              3008 arch/powerpc/kvm/book3s_hv.c 	if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
cpu              3015 arch/powerpc/kvm/book3s_hv.c 	core = cpu >> threads_shift;
cpu              3639 arch/powerpc/kvm/book3s_hv.c 	vcpu->cpu = -1;
cpu              4719 arch/powerpc/kvm/book3s_hv.c 	int cpu, core;
cpu              4740 arch/powerpc/kvm/book3s_hv.c 	for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
cpu              4741 arch/powerpc/kvm/book3s_hv.c 		if (!cpu_online(cpu))
cpu              4744 arch/powerpc/kvm/book3s_hv.c 		core = cpu >> threads_shift;
cpu              5466 arch/powerpc/kvm/book3s_hv.c 			int cpu = first_cpu + j;
cpu              5468 arch/powerpc/kvm/book3s_hv.c 			paca_ptrs[cpu]->sibling_subcore_state =
cpu               226 arch/powerpc/kvm/book3s_hv_builtin.c void kvmhv_rm_send_ipi(int cpu)
cpu               235 arch/powerpc/kvm/book3s_hv_builtin.c 		plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
cpu               242 arch/powerpc/kvm/book3s_hv_builtin.c 		msg |= get_hard_smp_processor_id(cpu);
cpu               249 arch/powerpc/kvm/book3s_hv_builtin.c 	    cpu_first_thread_sibling(cpu) ==
cpu               251 arch/powerpc/kvm/book3s_hv_builtin.c 		msg |= cpu_thread_in_core(cpu);
cpu               261 arch/powerpc/kvm/book3s_hv_builtin.c 	xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
cpu               265 arch/powerpc/kvm/book3s_hv_builtin.c 		opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
cpu               274 arch/powerpc/kvm/book3s_hv_builtin.c 	int cpu = vc->pcpu;
cpu               278 arch/powerpc/kvm/book3s_hv_builtin.c 	for (; active; active >>= 1, ++cpu)
cpu               280 arch/powerpc/kvm/book3s_hv_builtin.c 			kvmhv_rm_send_ipi(cpu);
cpu                46 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	int cpu;
cpu                64 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
cpu                70 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			cpu = cpu_first_thread_sibling(cpu);
cpu                71 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
cpu               131 arch/powerpc/kvm/book3s_hv_rm_xics.c 	int cpu;
cpu               156 arch/powerpc/kvm/book3s_hv_rm_xics.c 	cpu = vcpu->arch.thread_cpu;
cpu               157 arch/powerpc/kvm/book3s_hv_rm_xics.c 	if (cpu < 0 || cpu >= nr_cpu_ids) {
cpu               171 arch/powerpc/kvm/book3s_hv_rm_xics.c 	kvmhv_rm_send_ipi(cpu);
cpu               817 arch/powerpc/kvm/book3s_hv_rm_xics.c 	int cpu = smp_processor_id();
cpu               819 arch/powerpc/kvm/book3s_hv_rm_xics.c 	raddr = per_cpu_ptr(addr, cpu);
cpu               920 arch/powerpc/kvm/book3s_hv_rm_xics.c 	unsigned int cpu = smp_processor_id();
cpu               923 arch/powerpc/kvm/book3s_hv_rm_xics.c 	core = cpu >> threads_shift;
cpu                95 arch/powerpc/kvm/book3s_pr.c static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
cpu               110 arch/powerpc/kvm/book3s_pr.c 	vcpu->cpu = smp_processor_id();
cpu               145 arch/powerpc/kvm/book3s_pr.c 	vcpu->cpu = -1;
cpu              1215 arch/powerpc/kvm/book3s_xive.c 			     struct kvm_vcpu *vcpu, u32 cpu)
cpu              1222 arch/powerpc/kvm/book3s_xive.c 	pr_devel("connect_vcpu(cpu=%d)\n", cpu);
cpu              1232 arch/powerpc/kvm/book3s_xive.c 	if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
cpu              1240 arch/powerpc/kvm/book3s_xive.c 	vp_id = kvmppc_xive_vp(xive, cpu);
cpu              1256 arch/powerpc/kvm/book3s_xive.c 	xc->server_num = cpu;
cpu              1319 arch/powerpc/kvm/book3s_xive.c 				       i, cpu);
cpu              2092 arch/powerpc/kvm/booke.c void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu              2094 arch/powerpc/kvm/booke.c 	vcpu->cpu = smp_processor_id();
cpu              2101 arch/powerpc/kvm/booke.c 	vcpu->cpu = -1;
cpu              2132 arch/powerpc/kvm/booke.c void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu              2134 arch/powerpc/kvm/booke.c 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
cpu                85 arch/powerpc/kvm/booke.h void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
cpu               299 arch/powerpc/kvm/e500.c static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
cpu               301 arch/powerpc/kvm/e500.c 	kvmppc_booke_vcpu_load(vcpu, cpu);
cpu               112 arch/powerpc/kvm/e500mc.c static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
cpu               116 arch/powerpc/kvm/e500mc.c 	kvmppc_booke_vcpu_load(vcpu, cpu);
cpu              1128 arch/powerpc/kvm/mpic.c 			     int cpu)
cpu              1164 arch/powerpc/kvm/mpic.c 		src->destmask &= ~(1 << cpu);
cpu              1180 arch/powerpc/kvm/mpic.c 	int cpu = vcpu->arch.irq_cpu_id;
cpu              1186 arch/powerpc/kvm/mpic.c 		kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
cpu              1734 arch/powerpc/kvm/mpic.c 			     u32 cpu)
cpu              1743 arch/powerpc/kvm/mpic.c 	if (cpu < 0 || cpu >= MAX_CPU)
cpu              1748 arch/powerpc/kvm/mpic.c 	if (opp->dst[cpu].vcpu) {
cpu              1757 arch/powerpc/kvm/mpic.c 	opp->dst[cpu].vcpu = vcpu;
cpu              1758 arch/powerpc/kvm/mpic.c 	opp->nb_cpus = max(opp->nb_cpus, cpu + 1);
cpu              1761 arch/powerpc/kvm/mpic.c 	vcpu->arch.irq_cpu_id = cpu;
cpu               800 arch/powerpc/kvm/powerpc.c void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu               812 arch/powerpc/kvm/powerpc.c 	kvmppc_core_vcpu_load(vcpu, cpu);
cpu                44 arch/powerpc/lib/code-patching.c static int text_area_cpu_up(unsigned int cpu)
cpu                51 arch/powerpc/lib/code-patching.c 			cpu);
cpu                59 arch/powerpc/lib/code-patching.c static int text_area_cpu_down(unsigned int cpu)
cpu                36 arch/powerpc/mm/book3s64/pkeys.c 	struct device_node *cpu;
cpu                38 arch/powerpc/mm/book3s64/pkeys.c 	cpu = of_find_node_by_type(NULL, "cpu");
cpu                39 arch/powerpc/mm/book3s64/pkeys.c 	if (!cpu)
cpu                42 arch/powerpc/mm/book3s64/pkeys.c 	if (of_property_read_u32_array(cpu,
cpu              1240 arch/powerpc/mm/book3s64/radix_tlb.c 		int cpu = smp_processor_id();
cpu              1241 arch/powerpc/mm/book3s64/radix_tlb.c 		int sib = cpu_first_thread_sibling(cpu);
cpu              1244 arch/powerpc/mm/book3s64/radix_tlb.c 		for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
cpu              1245 arch/powerpc/mm/book3s64/radix_tlb.c 			if (sib == cpu)
cpu               219 arch/powerpc/mm/nohash/44x.c void __init mmu_init_secondary(int cpu)
cpu               112 arch/powerpc/mm/nohash/mmu_context.c 	unsigned int cpu, max, i;
cpu               140 arch/powerpc/mm/nohash/mmu_context.c 		for_each_cpu(cpu, mm_cpumask(mm)) {
cpu               141 arch/powerpc/mm/nohash/mmu_context.c 			for (i = cpu_first_thread_sibling(cpu);
cpu               142 arch/powerpc/mm/nohash/mmu_context.c 			     i <= cpu_last_thread_sibling(cpu); i++) {
cpu               146 arch/powerpc/mm/nohash/mmu_context.c 			cpu = i - 1;
cpu               167 arch/powerpc/mm/nohash/mmu_context.c 	int cpu = smp_processor_id();
cpu               187 arch/powerpc/mm/nohash/mmu_context.c 		__clear_bit(id, stale_map[cpu]);
cpu               208 arch/powerpc/mm/nohash/mmu_context.c 	int cpu = smp_processor_id();
cpu               224 arch/powerpc/mm/nohash/mmu_context.c 	__clear_bit(id, stale_map[cpu]);
cpu               266 arch/powerpc/mm/nohash/mmu_context.c 	unsigned int i, cpu = smp_processor_id();
cpu               274 arch/powerpc/mm/nohash/mmu_context.c 		cpu, next, next->context.active, next->context.id);
cpu               342 arch/powerpc/mm/nohash/mmu_context.c 	if (test_bit(id, stale_map[cpu])) {
cpu               344 arch/powerpc/mm/nohash/mmu_context.c 			    id, cpu_first_thread_sibling(cpu),
cpu               345 arch/powerpc/mm/nohash/mmu_context.c 			    cpu_last_thread_sibling(cpu));
cpu               350 arch/powerpc/mm/nohash/mmu_context.c 		for (i = cpu_first_thread_sibling(cpu);
cpu               351 arch/powerpc/mm/nohash/mmu_context.c 		     i <= cpu_last_thread_sibling(cpu); i++) {
cpu               414 arch/powerpc/mm/nohash/mmu_context.c static int mmu_ctx_cpu_prepare(unsigned int cpu)
cpu               419 arch/powerpc/mm/nohash/mmu_context.c 	if (cpu == boot_cpuid)
cpu               422 arch/powerpc/mm/nohash/mmu_context.c 	pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
cpu               423 arch/powerpc/mm/nohash/mmu_context.c 	stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
cpu               427 arch/powerpc/mm/nohash/mmu_context.c static int mmu_ctx_cpu_dead(unsigned int cpu)
cpu               430 arch/powerpc/mm/nohash/mmu_context.c 	if (cpu == boot_cpuid)
cpu               433 arch/powerpc/mm/nohash/mmu_context.c 	pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
cpu               434 arch/powerpc/mm/nohash/mmu_context.c 	kfree(stale_map[cpu]);
cpu               435 arch/powerpc/mm/nohash/mmu_context.c 	stale_map[cpu] = NULL;
cpu               438 arch/powerpc/mm/nohash/mmu_context.c 	clear_tasks_mm_cpumask(cpu);
cpu               134 arch/powerpc/mm/numa.c 	unsigned int cpu;
cpu               136 arch/powerpc/mm/numa.c 	for_each_possible_cpu(cpu)
cpu               137 arch/powerpc/mm/numa.c 		numa_cpu_lookup_table[cpu] = -1;
cpu               140 arch/powerpc/mm/numa.c static void map_cpu_to_node(int cpu, int node)
cpu               142 arch/powerpc/mm/numa.c 	update_numa_cpu_lookup_table(cpu, node);
cpu               144 arch/powerpc/mm/numa.c 	dbg("adding cpu %d to node %d\n", cpu, node);
cpu               146 arch/powerpc/mm/numa.c 	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
cpu               147 arch/powerpc/mm/numa.c 		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
cpu               151 arch/powerpc/mm/numa.c static void unmap_cpu_from_node(unsigned long cpu)
cpu               153 arch/powerpc/mm/numa.c 	int node = numa_cpu_lookup_table[cpu];
cpu               155 arch/powerpc/mm/numa.c 	dbg("removing cpu %lu from node %d\n", cpu, node);
cpu               157 arch/powerpc/mm/numa.c 	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
cpu               158 arch/powerpc/mm/numa.c 		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
cpu               161 arch/powerpc/mm/numa.c 		       cpu, node);
cpu               471 arch/powerpc/mm/numa.c 	struct device_node *cpu;
cpu               483 arch/powerpc/mm/numa.c 	cpu = of_get_cpu_node(lcpu, NULL);
cpu               485 arch/powerpc/mm/numa.c 	if (!cpu) {
cpu               493 arch/powerpc/mm/numa.c 	nid = of_node_to_nid_single(cpu);
cpu               500 arch/powerpc/mm/numa.c 	of_node_put(cpu);
cpu               505 arch/powerpc/mm/numa.c static void verify_cpu_node_mapping(int cpu, int node)
cpu               510 arch/powerpc/mm/numa.c 	base = cpu_first_thread_sibling(cpu);
cpu               515 arch/powerpc/mm/numa.c 		if (sibling == cpu || cpu_is_offline(sibling))
cpu               520 arch/powerpc/mm/numa.c 				" to the same node!\n", cpu, sibling);
cpu               527 arch/powerpc/mm/numa.c static int ppc_numa_cpu_prepare(unsigned int cpu)
cpu               531 arch/powerpc/mm/numa.c 	nid = numa_setup_cpu(cpu);
cpu               532 arch/powerpc/mm/numa.c 	verify_cpu_node_mapping(cpu, nid);
cpu               536 arch/powerpc/mm/numa.c static int ppc_numa_cpu_dead(unsigned int cpu)
cpu               539 arch/powerpc/mm/numa.c 	unmap_cpu_from_node(cpu);
cpu               664 arch/powerpc/mm/numa.c 		struct device_node *cpu;
cpu               667 arch/powerpc/mm/numa.c 		cpu = of_get_cpu_node(i, NULL);
cpu               668 arch/powerpc/mm/numa.c 		BUG_ON(!cpu);
cpu               669 arch/powerpc/mm/numa.c 		nid = of_node_to_nid_single(cpu);
cpu               670 arch/powerpc/mm/numa.c 		of_node_put(cpu);
cpu               768 arch/powerpc/mm/numa.c 	unsigned int cpu, count;
cpu               781 arch/powerpc/mm/numa.c 		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
cpu               782 arch/powerpc/mm/numa.c 			if (cpumask_test_cpu(cpu,
cpu               785 arch/powerpc/mm/numa.c 					pr_cont(" %u", cpu);
cpu               789 arch/powerpc/mm/numa.c 					pr_cont("-%u", cpu - 1);
cpu               858 arch/powerpc/mm/numa.c 	int cpu;
cpu               877 arch/powerpc/mm/numa.c 	for_each_present_cpu(cpu)
cpu               878 arch/powerpc/mm/numa.c 		numa_setup_cpu(cpu);
cpu              1092 arch/powerpc/mm/numa.c 	unsigned int cpu;
cpu              1130 arch/powerpc/mm/numa.c 	int cpu;
cpu              1135 arch/powerpc/mm/numa.c 	for_each_possible_cpu(cpu) {
cpu              1137 arch/powerpc/mm/numa.c 		u8 *counts = vphn_cpu_change_counts[cpu];
cpu              1138 arch/powerpc/mm/numa.c 		volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
cpu              1158 arch/powerpc/mm/numa.c 	int cpu;
cpu              1161 arch/powerpc/mm/numa.c 	for_each_possible_cpu(cpu) {
cpu              1163 arch/powerpc/mm/numa.c 		u8 *counts = vphn_cpu_change_counts[cpu];
cpu              1164 arch/powerpc/mm/numa.c 		volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
cpu              1173 arch/powerpc/mm/numa.c 			cpumask_or(changes, changes, cpu_sibling_mask(cpu));
cpu              1174 arch/powerpc/mm/numa.c 			cpu = cpu_last_thread_sibling(cpu);
cpu              1185 arch/powerpc/mm/numa.c static long vphn_get_associativity(unsigned long cpu,
cpu              1190 arch/powerpc/mm/numa.c 	rc = hcall_vphn(get_hard_smp_processor_id(cpu),
cpu              1214 arch/powerpc/mm/numa.c int find_and_online_cpu_nid(int cpu)
cpu              1220 arch/powerpc/mm/numa.c 	if (vphn_get_associativity(cpu, associativity))
cpu              1221 arch/powerpc/mm/numa.c 		return cpu_to_node(cpu);
cpu              1249 arch/powerpc/mm/numa.c 		cpu, new_nid);
cpu              1261 arch/powerpc/mm/numa.c 	unsigned long cpu;
cpu              1266 arch/powerpc/mm/numa.c 	cpu = smp_processor_id();
cpu              1270 arch/powerpc/mm/numa.c 		if (cpu != update->cpu)
cpu              1273 arch/powerpc/mm/numa.c 		unmap_cpu_from_node(cpu);
cpu              1274 arch/powerpc/mm/numa.c 		map_cpu_to_node(cpu, new_nid);
cpu              1275 arch/powerpc/mm/numa.c 		set_cpu_numa_node(cpu, new_nid);
cpu              1276 arch/powerpc/mm/numa.c 		set_cpu_numa_mem(cpu, local_memory_node(new_nid));
cpu              1300 arch/powerpc/mm/numa.c 		base = cpu_first_thread_sibling(update->cpu);
cpu              1318 arch/powerpc/mm/numa.c 	unsigned int cpu, sibling, changed = 0;
cpu              1337 arch/powerpc/mm/numa.c 	for_each_cpu(cpu, &cpu_associativity_changes_mask) {
cpu              1343 arch/powerpc/mm/numa.c 		if (!cpumask_subset(cpu_sibling_mask(cpu),
cpu              1346 arch/powerpc/mm/numa.c 					"change, cpu%d\n", cpu);
cpu              1349 arch/powerpc/mm/numa.c 					cpu_sibling_mask(cpu));
cpu              1350 arch/powerpc/mm/numa.c 			cpu = cpu_last_thread_sibling(cpu);
cpu              1354 arch/powerpc/mm/numa.c 		new_nid = find_and_online_cpu_nid(cpu);
cpu              1356 arch/powerpc/mm/numa.c 		if (new_nid == numa_cpu_lookup_table[cpu]) {
cpu              1359 arch/powerpc/mm/numa.c 					cpu_sibling_mask(cpu));
cpu              1361 arch/powerpc/mm/numa.c 					new_nid, cpu);
cpu              1362 arch/powerpc/mm/numa.c 			cpu = cpu_last_thread_sibling(cpu);
cpu              1366 arch/powerpc/mm/numa.c 		for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
cpu              1369 arch/powerpc/mm/numa.c 			ud->cpu = sibling;
cpu              1374 arch/powerpc/mm/numa.c 		cpu = cpu_last_thread_sibling(cpu);
cpu              1388 arch/powerpc/mm/numa.c 					  "to %d\n", ud->cpu,
cpu              1426 arch/powerpc/mm/numa.c 		unregister_cpu_under_node(ud->cpu, ud->old_nid);
cpu              1427 arch/powerpc/mm/numa.c 		register_cpu_under_node(ud->cpu, ud->new_nid);
cpu              1429 arch/powerpc/mm/numa.c 		dev = get_cpu_device(ud->cpu);
cpu              1432 arch/powerpc/mm/numa.c 		cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
cpu               105 arch/powerpc/net/bpf_jit32.h 	do { BUILD_BUG_ON(FIELD_SIZEOF(struct task_struct, cpu) != 4);		\
cpu               106 arch/powerpc/net/bpf_jit32.h 		PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu));		\
cpu                73 arch/powerpc/oprofile/cell/spu_profiler.c static void spu_pc_extract(int cpu, int entry)
cpu                90 arch/powerpc/oprofile/cell/spu_profiler.c 	cbe_read_trace_buffer(cpu, trace_buffer);
cpu               106 arch/powerpc/oprofile/cell/spu_profiler.c static int cell_spu_pc_collection(int cpu)
cpu               115 arch/powerpc/oprofile/cell/spu_profiler.c 	trace_addr = cbe_read_pm(cpu, trace_address);
cpu               118 arch/powerpc/oprofile/cell/spu_profiler.c 		spu_pc_extract(cpu, entry);
cpu               126 arch/powerpc/oprofile/cell/spu_profiler.c 		trace_addr = cbe_read_pm(cpu, trace_address);
cpu               136 arch/powerpc/oprofile/cell/spu_profiler.c 	int cpu, node, k, num_samples, spu_num;
cpu               141 arch/powerpc/oprofile/cell/spu_profiler.c 	for_each_online_cpu(cpu) {
cpu               142 arch/powerpc/oprofile/cell/spu_profiler.c 		if (cbe_get_hw_thread_id(cpu))
cpu               145 arch/powerpc/oprofile/cell/spu_profiler.c 		node = cbe_cpu_to_node(cpu);
cpu               157 arch/powerpc/oprofile/cell/spu_profiler.c 		num_samples = cell_spu_pc_collection(cpu);
cpu               446 arch/powerpc/oprofile/cell/spu_task_sync.c         u32 cpu; u32 tmp;
cpu               448 arch/powerpc/oprofile/cell/spu_task_sync.c         for_each_online_cpu(cpu) {
cpu               449 arch/powerpc/oprofile/cell/spu_task_sync.c                 tmp = cbe_cpu_to_node(cpu) + 1;
cpu                90 arch/powerpc/oprofile/op_model_cell.c 	u16 cpu;		/* Processor to modify */
cpu               215 arch/powerpc/oprofile/op_model_cell.c 	pm_signal_local.cpu = node;
cpu               254 arch/powerpc/oprofile/op_model_cell.c 			pm_signal_local[i].cpu = node;
cpu               369 arch/powerpc/oprofile/op_model_cell.c static void write_pm_cntrl(int cpu)
cpu               398 arch/powerpc/oprofile/op_model_cell.c 	cbe_write_pm(cpu, pm_control, val);
cpu               424 arch/powerpc/oprofile/op_model_cell.c static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
cpu               428 arch/powerpc/oprofile/op_model_cell.c 	cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
cpu               453 arch/powerpc/oprofile/op_model_cell.c 	u32 cpu;
cpu               488 arch/powerpc/oprofile/op_model_cell.c 	for_each_online_cpu(cpu) {
cpu               489 arch/powerpc/oprofile/op_model_cell.c 		if (cbe_get_hw_thread_id(cpu))
cpu               496 arch/powerpc/oprofile/op_model_cell.c 		cbe_disable_pm(cpu);
cpu               497 arch/powerpc/oprofile/op_model_cell.c 		cbe_disable_pm_interrupts(cpu);
cpu               499 arch/powerpc/oprofile/op_model_cell.c 			per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
cpu               500 arch/powerpc/oprofile/op_model_cell.c 				= cbe_read_ctr(cpu, i);
cpu               502 arch/powerpc/oprofile/op_model_cell.c 			if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
cpu               514 arch/powerpc/oprofile/op_model_cell.c 				cbe_write_ctr(cpu, i, 0xFFFFFFF0);
cpu               516 arch/powerpc/oprofile/op_model_cell.c 				cbe_write_ctr(cpu, i,
cpu               518 arch/powerpc/oprofile/op_model_cell.c 						      cpu +
cpu               534 arch/powerpc/oprofile/op_model_cell.c 				enable_ctr(cpu, i,
cpu               537 arch/powerpc/oprofile/op_model_cell.c 				cbe_write_pm07_control(cpu, i, 0);
cpu               542 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm_interrupts(cpu, next_hdw_thread,
cpu               544 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm(cpu);
cpu               591 arch/powerpc/oprofile/op_model_cell.c 	int cpu;
cpu               616 arch/powerpc/oprofile/op_model_cell.c 	for_each_online_cpu(cpu) {
cpu               617 arch/powerpc/oprofile/op_model_cell.c 		if (cbe_get_hw_thread_id(cpu))
cpu               620 arch/powerpc/oprofile/op_model_cell.c 		node = cbe_cpu_to_node(cpu);
cpu               630 arch/powerpc/oprofile/op_model_cell.c 		cbe_disable_pm(cpu);
cpu               631 arch/powerpc/oprofile/op_model_cell.c 		cbe_disable_pm_interrupts(cpu);
cpu               634 arch/powerpc/oprofile/op_model_cell.c 			= cbe_read_ctr(cpu, 0);
cpu               641 arch/powerpc/oprofile/op_model_cell.c 			cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
cpu               643 arch/powerpc/oprofile/op_model_cell.c 			 cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
cpu               645 arch/powerpc/oprofile/op_model_cell.c 		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
cpu               651 arch/powerpc/oprofile/op_model_cell.c 		ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
cpu               658 arch/powerpc/oprofile/op_model_cell.c 		cbe_write_pm(cpu, trace_address, 0);
cpu               660 arch/powerpc/oprofile/op_model_cell.c 		enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
cpu               663 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm_interrupts(cpu, hdw_thread,
cpu               665 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm(cpu);
cpu               754 arch/powerpc/oprofile/op_model_cell.c 	int i, j, cpu;
cpu               824 arch/powerpc/oprofile/op_model_cell.c 	for_each_online_cpu(cpu)
cpu               826 arch/powerpc/oprofile/op_model_cell.c 			per_cpu(pmc_values, cpu)[i] = reset_value[i];
cpu               894 arch/powerpc/oprofile/op_model_cell.c 	u32 cpu = smp_processor_id();
cpu               909 arch/powerpc/oprofile/op_model_cell.c 	if (cbe_get_hw_thread_id(cpu))
cpu               913 arch/powerpc/oprofile/op_model_cell.c 	cbe_disable_pm(cpu);
cpu               914 arch/powerpc/oprofile/op_model_cell.c 	cbe_disable_pm_interrupts(cpu);
cpu               916 arch/powerpc/oprofile/op_model_cell.c 	cbe_write_pm(cpu, pm_start_stop, 0);
cpu               917 arch/powerpc/oprofile/op_model_cell.c 	cbe_write_pm(cpu, group_control, pm_regs.group_control);
cpu               918 arch/powerpc/oprofile/op_model_cell.c 	cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
cpu               919 arch/powerpc/oprofile/op_model_cell.c 	write_pm_cntrl(cpu);
cpu               923 arch/powerpc/oprofile/op_model_cell.c 			pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
cpu               936 arch/powerpc/oprofile/op_model_cell.c 		ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
cpu               941 arch/powerpc/oprofile/op_model_cell.c 		cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
cpu               944 arch/powerpc/oprofile/op_model_cell.c 		return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
cpu              1086 arch/powerpc/oprofile/op_model_cell.c 		pm_signal_local[i].cpu = node;
cpu              1138 arch/powerpc/oprofile/op_model_cell.c 	int cpu;
cpu              1148 arch/powerpc/oprofile/op_model_cell.c 	for_each_online_cpu(cpu) {
cpu              1149 arch/powerpc/oprofile/op_model_cell.c 		if (cbe_get_hw_thread_id(cpu))
cpu              1159 arch/powerpc/oprofile/op_model_cell.c 				      subfunc, cbe_cpu_to_node(cpu),
cpu              1170 arch/powerpc/oprofile/op_model_cell.c 		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
cpu              1178 arch/powerpc/oprofile/op_model_cell.c 	int cpu;
cpu              1184 arch/powerpc/oprofile/op_model_cell.c 	for_each_online_cpu(cpu) {
cpu              1185 arch/powerpc/oprofile/op_model_cell.c 		if (cbe_get_hw_thread_id(cpu))
cpu              1188 arch/powerpc/oprofile/op_model_cell.c 		cbe_sync_irq(cbe_cpu_to_node(cpu));
cpu              1190 arch/powerpc/oprofile/op_model_cell.c 		cbe_disable_pm(cpu);
cpu              1191 arch/powerpc/oprofile/op_model_cell.c 		cbe_write_pm07_control(cpu, 0, 0);
cpu              1194 arch/powerpc/oprofile/op_model_cell.c 		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
cpu              1197 arch/powerpc/oprofile/op_model_cell.c 		cbe_disable_pm_interrupts(cpu);
cpu              1204 arch/powerpc/oprofile/op_model_cell.c 	int cpu;
cpu              1215 arch/powerpc/oprofile/op_model_cell.c 	for_each_online_cpu(cpu) {
cpu              1216 arch/powerpc/oprofile/op_model_cell.c 		if (cbe_get_hw_thread_id(cpu))
cpu              1219 arch/powerpc/oprofile/op_model_cell.c 		cbe_sync_irq(cbe_cpu_to_node(cpu));
cpu              1221 arch/powerpc/oprofile/op_model_cell.c 		cbe_disable_pm(cpu);
cpu              1224 arch/powerpc/oprofile/op_model_cell.c 		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
cpu              1227 arch/powerpc/oprofile/op_model_cell.c 		cbe_disable_pm_interrupts(cpu);
cpu              1245 arch/powerpc/oprofile/op_model_cell.c 	int cpu;
cpu              1269 arch/powerpc/oprofile/op_model_cell.c 	for_each_online_cpu(cpu) {
cpu              1270 arch/powerpc/oprofile/op_model_cell.c 		if (cbe_get_hw_thread_id(cpu))
cpu              1278 arch/powerpc/oprofile/op_model_cell.c 		cbe_write_pm(cpu, pm_control, 0);
cpu              1295 arch/powerpc/oprofile/op_model_cell.c 		ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
cpu              1307 arch/powerpc/oprofile/op_model_cell.c 				cbe_cpu_to_node(cpu), lfsr_value);
cpu              1333 arch/powerpc/oprofile/op_model_cell.c 	int cpu;
cpu              1353 arch/powerpc/oprofile/op_model_cell.c 	for_each_online_cpu(cpu) {
cpu              1354 arch/powerpc/oprofile/op_model_cell.c 		if (cbe_get_hw_thread_id(cpu))
cpu              1365 arch/powerpc/oprofile/op_model_cell.c 			cbe_write_ctr(cpu, 0, reset_value[0]);
cpu              1366 arch/powerpc/oprofile/op_model_cell.c 			enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
cpu              1371 arch/powerpc/oprofile/op_model_cell.c 			cbe_write_pm07_control(cpu, 0, 0);
cpu              1374 arch/powerpc/oprofile/op_model_cell.c 		cbe_get_and_clear_pm_interrupts(cpu);
cpu              1375 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
cpu              1376 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm(cpu);
cpu              1379 arch/powerpc/oprofile/op_model_cell.c 		cbe_write_pm(cpu, trace_address, 0);
cpu              1396 arch/powerpc/oprofile/op_model_cell.c 	u32 cpu, i;
cpu              1403 arch/powerpc/oprofile/op_model_cell.c 	for_each_online_cpu(cpu) {
cpu              1404 arch/powerpc/oprofile/op_model_cell.c 		if (cbe_get_hw_thread_id(cpu))
cpu              1411 arch/powerpc/oprofile/op_model_cell.c 				cbe_write_ctr(cpu, i, reset_value[i]);
cpu              1412 arch/powerpc/oprofile/op_model_cell.c 				enable_ctr(cpu, i, pm_regs.pm07_cntrl);
cpu              1416 arch/powerpc/oprofile/op_model_cell.c 				cbe_write_pm07_control(cpu, i, 0);
cpu              1420 arch/powerpc/oprofile/op_model_cell.c 		cbe_get_and_clear_pm_interrupts(cpu);
cpu              1421 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
cpu              1422 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm(cpu);
cpu              1481 arch/powerpc/oprofile/op_model_cell.c 	u32 cpu, cpu_tmp;
cpu              1495 arch/powerpc/oprofile/op_model_cell.c 	cpu = smp_processor_id();
cpu              1498 arch/powerpc/oprofile/op_model_cell.c 	cpu_tmp = cpu;
cpu              1499 arch/powerpc/oprofile/op_model_cell.c 	cbe_disable_pm(cpu);
cpu              1501 arch/powerpc/oprofile/op_model_cell.c 	interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
cpu              1509 arch/powerpc/oprofile/op_model_cell.c 		cbe_write_pm(cpu, pm_interval, 0);
cpu              1518 arch/powerpc/oprofile/op_model_cell.c 			cbe_write_ctr(cpu, 0, reset_value[0]);
cpu              1520 arch/powerpc/oprofile/op_model_cell.c 		trace_addr = cbe_read_pm(cpu, trace_address);
cpu              1528 arch/powerpc/oprofile/op_model_cell.c 			cbe_read_trace_buffer(cpu, trace_buffer);
cpu              1529 arch/powerpc/oprofile/op_model_cell.c 			trace_addr = cbe_read_pm(cpu, trace_address);
cpu              1557 arch/powerpc/oprofile/op_model_cell.c 			+ (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
cpu              1574 arch/powerpc/oprofile/op_model_cell.c 		cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
cpu              1575 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm_interrupts(cpu, hdw_thread,
cpu              1579 arch/powerpc/oprofile/op_model_cell.c 		cbe_write_pm(cpu, trace_address, 0);
cpu              1580 arch/powerpc/oprofile/op_model_cell.c 		cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
cpu              1590 arch/powerpc/oprofile/op_model_cell.c 		write_pm_cntrl(cpu);
cpu              1591 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm(cpu);
cpu              1599 arch/powerpc/oprofile/op_model_cell.c 	u32 cpu;
cpu              1606 arch/powerpc/oprofile/op_model_cell.c 	cpu = smp_processor_id();
cpu              1621 arch/powerpc/oprofile/op_model_cell.c 	cbe_disable_pm(cpu);
cpu              1623 arch/powerpc/oprofile/op_model_cell.c 	interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
cpu              1640 arch/powerpc/oprofile/op_model_cell.c 				cbe_write_ctr(cpu, i, reset_value[i]);
cpu              1652 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm_interrupts(cpu, hdw_thread,
cpu              1664 arch/powerpc/oprofile/op_model_cell.c 		cbe_enable_pm(cpu);
cpu              2264 arch/powerpc/perf/core-book3s.c static int power_pmu_prepare_cpu(unsigned int cpu)
cpu              2266 arch/powerpc/perf/core-book3s.c 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
cpu               700 arch/powerpc/perf/core-fsl-emb.c void hw_perf_event_setup(int cpu)
cpu               702 arch/powerpc/perf/core-fsl-emb.c 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
cpu                50 arch/powerpc/perf/generic-compat-pmu.c GENERIC_EVENT_ATTR(cpu-cycles,			PM_CYC);
cpu               315 arch/powerpc/perf/imc-pmu.c static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
cpu               317 arch/powerpc/perf/imc-pmu.c 	return per_cpu(local_nest_imc_refc, cpu);
cpu               333 arch/powerpc/perf/imc-pmu.c static int ppc_nest_imc_cpu_offline(unsigned int cpu)
cpu               343 arch/powerpc/perf/imc-pmu.c 	if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask))
cpu               363 arch/powerpc/perf/imc-pmu.c 	nid = cpu_to_node(cpu);
cpu               371 arch/powerpc/perf/imc-pmu.c 	if (unlikely(target == cpu))
cpu               372 arch/powerpc/perf/imc-pmu.c 		target = cpumask_any_but(l_cpumask, cpu);
cpu               380 arch/powerpc/perf/imc-pmu.c 		nest_change_cpu_context(cpu, target);
cpu               383 arch/powerpc/perf/imc-pmu.c 				       get_hard_smp_processor_id(cpu));
cpu               388 arch/powerpc/perf/imc-pmu.c 		ref = get_nest_pmu_ref(cpu);
cpu               397 arch/powerpc/perf/imc-pmu.c static int ppc_nest_imc_cpu_online(unsigned int cpu)
cpu               404 arch/powerpc/perf/imc-pmu.c 	l_cpumask = cpumask_of_node(cpu_to_node(cpu));
cpu               418 arch/powerpc/perf/imc-pmu.c 				     get_hard_smp_processor_id(cpu));
cpu               423 arch/powerpc/perf/imc-pmu.c 	cpumask_set_cpu(cpu, &nest_imc_cpumask);
cpu               440 arch/powerpc/perf/imc-pmu.c 	if (event->cpu < 0)
cpu               443 arch/powerpc/perf/imc-pmu.c 	node_id = cpu_to_node(event->cpu);
cpu               451 arch/powerpc/perf/imc-pmu.c 	ref = get_nest_pmu_ref(event->cpu);
cpu               474 arch/powerpc/perf/imc-pmu.c 					    get_hard_smp_processor_id(event->cpu));
cpu               503 arch/powerpc/perf/imc-pmu.c 	if (event->cpu < 0)
cpu               516 arch/powerpc/perf/imc-pmu.c 	chip_id = cpu_to_chip_id(event->cpu);
cpu               539 arch/powerpc/perf/imc-pmu.c 	node_id = cpu_to_node(event->cpu);
cpu               546 arch/powerpc/perf/imc-pmu.c 	ref = get_nest_pmu_ref(event->cpu);
cpu               553 arch/powerpc/perf/imc-pmu.c 					     get_hard_smp_processor_id(event->cpu));
cpu               576 arch/powerpc/perf/imc-pmu.c static int core_imc_mem_init(int cpu, int size)
cpu               578 arch/powerpc/perf/imc-pmu.c 	int nid, rc = 0, core_id = (cpu / threads_per_core);
cpu               586 arch/powerpc/perf/imc-pmu.c 	nid = cpu_to_node(cpu);
cpu               604 arch/powerpc/perf/imc-pmu.c 				get_hard_smp_processor_id(cpu));
cpu               613 arch/powerpc/perf/imc-pmu.c static bool is_core_imc_mem_inited(int cpu)
cpu               616 arch/powerpc/perf/imc-pmu.c 	int core_id = (cpu / threads_per_core);
cpu               625 arch/powerpc/perf/imc-pmu.c static int ppc_core_imc_cpu_online(unsigned int cpu)
cpu               632 arch/powerpc/perf/imc-pmu.c 	l_cpumask = cpu_sibling_mask(cpu);
cpu               638 arch/powerpc/perf/imc-pmu.c 	if (!is_core_imc_mem_inited(cpu)) {
cpu               639 arch/powerpc/perf/imc-pmu.c 		ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size);
cpu               641 arch/powerpc/perf/imc-pmu.c 			pr_info("core_imc memory allocation for cpu %d failed\n", cpu);
cpu               647 arch/powerpc/perf/imc-pmu.c 	cpumask_set_cpu(cpu, &core_imc_cpumask);
cpu               651 arch/powerpc/perf/imc-pmu.c static int ppc_core_imc_cpu_offline(unsigned int cpu)
cpu               661 arch/powerpc/perf/imc-pmu.c 	if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask))
cpu               679 arch/powerpc/perf/imc-pmu.c 	ncpu = cpumask_last(cpu_sibling_mask(cpu));
cpu               681 arch/powerpc/perf/imc-pmu.c 	if (unlikely(ncpu == cpu))
cpu               682 arch/powerpc/perf/imc-pmu.c 		ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
cpu               686 arch/powerpc/perf/imc-pmu.c 		perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
cpu               694 arch/powerpc/perf/imc-pmu.c 				       get_hard_smp_processor_id(cpu));
cpu               695 arch/powerpc/perf/imc-pmu.c 		core_id = cpu / threads_per_core;
cpu               718 arch/powerpc/perf/imc-pmu.c 	if (event->cpu < 0)
cpu               726 arch/powerpc/perf/imc-pmu.c 	core_id = event->cpu / threads_per_core;
cpu               751 arch/powerpc/perf/imc-pmu.c 					    get_hard_smp_processor_id(event->cpu));
cpu               779 arch/powerpc/perf/imc-pmu.c 	if (event->cpu < 0)
cpu               789 arch/powerpc/perf/imc-pmu.c 	if (!is_core_imc_mem_inited(event->cpu))
cpu               792 arch/powerpc/perf/imc-pmu.c 	core_id = event->cpu / threads_per_core;
cpu               811 arch/powerpc/perf/imc-pmu.c 					     get_hard_smp_processor_id(event->cpu));
cpu               873 arch/powerpc/perf/imc-pmu.c static int ppc_thread_imc_cpu_online(unsigned int cpu)
cpu               875 arch/powerpc/perf/imc-pmu.c 	return thread_imc_mem_alloc(cpu, thread_imc_mem_size);
cpu               878 arch/powerpc/perf/imc-pmu.c static int ppc_thread_imc_cpu_offline(unsigned int cpu)
cpu              1129 arch/powerpc/perf/imc-pmu.c static int ppc_trace_imc_cpu_online(unsigned int cpu)
cpu              1131 arch/powerpc/perf/imc-pmu.c 	return trace_imc_mem_alloc(cpu, trace_imc_mem_size);
cpu              1134 arch/powerpc/perf/imc-pmu.c static int ppc_trace_imc_cpu_offline(unsigned int cpu)
cpu              1372 arch/powerpc/perf/imc-pmu.c 	int nid, i, cpu;
cpu              1401 arch/powerpc/perf/imc-pmu.c 	for_each_possible_cpu(cpu) {
cpu              1402 arch/powerpc/perf/imc-pmu.c 		nid = cpu_to_node(cpu);
cpu              1405 arch/powerpc/perf/imc-pmu.c 				per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i];
cpu              1532 arch/powerpc/perf/imc-pmu.c 	int nr_cores, cpu, res = -ENOMEM;
cpu              1584 arch/powerpc/perf/imc-pmu.c 		for_each_online_cpu(cpu) {
cpu              1585 arch/powerpc/perf/imc-pmu.c 			res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size);
cpu              1607 arch/powerpc/perf/imc-pmu.c 		for_each_online_cpu(cpu) {
cpu              1608 arch/powerpc/perf/imc-pmu.c 			res = trace_imc_mem_alloc(cpu, trace_imc_mem_size);
cpu               374 arch/powerpc/perf/power7-pmu.c GENERIC_EVENT_ATTR(cpu-cycles,			PM_CYC);
cpu               123 arch/powerpc/perf/power8-pmu.c GENERIC_EVENT_ATTR(cpu-cycles,			PM_CYC);
cpu               152 arch/powerpc/perf/power9-pmu.c GENERIC_EVENT_ATTR(cpu-cycles,			PM_CYC);
cpu                80 arch/powerpc/platforms/44x/iss4xx.c static void smp_iss4xx_setup_cpu(int cpu)
cpu                85 arch/powerpc/platforms/44x/iss4xx.c static int smp_iss4xx_kick_cpu(int cpu)
cpu                87 arch/powerpc/platforms/44x/iss4xx.c 	struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
cpu               101 arch/powerpc/platforms/44x/iss4xx.c 		pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
cpu               109 arch/powerpc/platforms/44x/iss4xx.c 	pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
cpu               111 arch/powerpc/platforms/44x/iss4xx.c 	spin_table[3] = cpu;
cpu               147 arch/powerpc/platforms/44x/ppc476.c static void smp_ppc47x_setup_cpu(int cpu)
cpu               152 arch/powerpc/platforms/44x/ppc476.c static int smp_ppc47x_kick_cpu(int cpu)
cpu               154 arch/powerpc/platforms/44x/ppc476.c 	struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
cpu               170 arch/powerpc/platforms/44x/ppc476.c 		       cpu);
cpu               180 arch/powerpc/platforms/44x/ppc476.c 	pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
cpu               182 arch/powerpc/platforms/44x/ppc476.c 	spin_table[3] = cpu;
cpu                20 arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c static void mpc85xx_irq_mask(int cpu)
cpu                25 arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c static void mpc85xx_irq_unmask(int cpu)
cpu                30 arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c static void mpc85xx_cpu_die(int cpu)
cpu                48 arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c static void mpc85xx_cpu_up_prepare(int cpu)
cpu               117 arch/powerpc/platforms/85xx/smp.c 	unsigned int cpu = smp_processor_id();
cpu               122 arch/powerpc/platforms/85xx/smp.c 	qoriq_pm_ops->irq_mask(cpu);
cpu               129 arch/powerpc/platforms/85xx/smp.c 	generic_set_cpu_dead(cpu);
cpu               133 arch/powerpc/platforms/85xx/smp.c 	qoriq_pm_ops->cpu_die(cpu);
cpu               139 arch/powerpc/platforms/85xx/smp.c static void qoriq_cpu_kill(unsigned int cpu)
cpu               144 arch/powerpc/platforms/85xx/smp.c 		if (is_cpu_dead(cpu)) {
cpu               146 arch/powerpc/platforms/85xx/smp.c 			paca_ptrs[cpu]->cpu_start = 0;
cpu               152 arch/powerpc/platforms/85xx/smp.c 	pr_err("CPU%d didn't die...\n", cpu);
cpu               181 arch/powerpc/platforms/85xx/smp.c 	int cpu = *(const int *)info;
cpu               184 arch/powerpc/platforms/85xx/smp.c 	book3e_start_thread(cpu_thread_in_core(cpu), inia);
cpu               188 arch/powerpc/platforms/85xx/smp.c static int smp_85xx_start_cpu(int cpu)
cpu               195 arch/powerpc/platforms/85xx/smp.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu               198 arch/powerpc/platforms/85xx/smp.c 	np = of_get_cpu_node(cpu, NULL);
cpu               201 arch/powerpc/platforms/85xx/smp.c 		pr_err("No cpu-release-addr for cpu %d\n", cpu);
cpu               224 arch/powerpc/platforms/85xx/smp.c 		qoriq_pm_ops->cpu_up_prepare(cpu);
cpu               232 arch/powerpc/platforms/85xx/smp.c 		mpic_reset_core(cpu);
cpu               380 arch/powerpc/platforms/85xx/smp.c 	int cpu = smp_processor_id();
cpu               381 arch/powerpc/platforms/85xx/smp.c 	int sibling = cpu_last_thread_sibling(cpu);
cpu               392 arch/powerpc/platforms/85xx/smp.c 	if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) {
cpu               399 arch/powerpc/platforms/85xx/smp.c 		disable_cpu = cpu_first_thread_sibling(cpu);
cpu               401 arch/powerpc/platforms/85xx/smp.c 		   cpu_thread_in_core(cpu) == 0 &&
cpu                64 arch/powerpc/platforms/8xx/m8xx_setup.c 	struct device_node *cpu;
cpu                69 arch/powerpc/platforms/8xx/m8xx_setup.c 	cpu = of_get_cpu_node(0, NULL);
cpu                71 arch/powerpc/platforms/8xx/m8xx_setup.c 	if (cpu) {
cpu                72 arch/powerpc/platforms/8xx/m8xx_setup.c 		fp = of_get_property(cpu, name, NULL);
cpu                78 arch/powerpc/platforms/8xx/m8xx_setup.c 		of_node_put(cpu);
cpu                90 arch/powerpc/platforms/8xx/m8xx_setup.c 	struct device_node *cpu;
cpu               150 arch/powerpc/platforms/8xx/m8xx_setup.c 	cpu = of_get_cpu_node(0, NULL);
cpu               151 arch/powerpc/platforms/8xx/m8xx_setup.c 	virq= irq_of_parse_and_map(cpu, 0);
cpu               152 arch/powerpc/platforms/8xx/m8xx_setup.c 	of_node_put(cpu);
cpu                91 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
cpu                93 arch/powerpc/platforms/cell/cbe_regs.c 	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
cpu               108 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
cpu               110 arch/powerpc/platforms/cell/cbe_regs.c 	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
cpu               124 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
cpu               126 arch/powerpc/platforms/cell/cbe_regs.c 	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
cpu               140 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
cpu               142 arch/powerpc/platforms/cell/cbe_regs.c 	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
cpu               149 arch/powerpc/platforms/cell/cbe_regs.c u32 cbe_get_hw_thread_id(int cpu)
cpu               151 arch/powerpc/platforms/cell/cbe_regs.c 	return cbe_thread_map[cpu].thread_id;
cpu               155 arch/powerpc/platforms/cell/cbe_regs.c u32 cbe_cpu_to_node(int cpu)
cpu               157 arch/powerpc/platforms/cell/cbe_regs.c 	return cbe_thread_map[cpu].cbe_id;
cpu               212 arch/powerpc/platforms/cell/cbe_regs.c 		struct device_node *cpu;
cpu               219 arch/powerpc/platforms/cell/cbe_regs.c 		cpu = map->cpu_node;
cpu               221 arch/powerpc/platforms/cell/cbe_regs.c 		prop = of_get_property(cpu, "pervasive", NULL);
cpu               225 arch/powerpc/platforms/cell/cbe_regs.c 		prop = of_get_property(cpu, "iic", NULL);
cpu               229 arch/powerpc/platforms/cell/cbe_regs.c 		prop = of_get_property(cpu, "mic-tm", NULL);
cpu               240 arch/powerpc/platforms/cell/cbe_regs.c 	struct device_node *cpu;
cpu               250 arch/powerpc/platforms/cell/cbe_regs.c 	for_each_node_by_type(cpu, "cpu") {
cpu               261 arch/powerpc/platforms/cell/cbe_regs.c 			of_node_put(cpu);
cpu               264 arch/powerpc/platforms/cell/cbe_regs.c 		map->cpu_node = cpu;
cpu               269 arch/powerpc/platforms/cell/cbe_regs.c 			if (thread->cpu_node == cpu) {
cpu               296 arch/powerpc/platforms/cell/cbe_thermal.c 	int cpu;
cpu               338 arch/powerpc/platforms/cell/cbe_thermal.c 	for_each_possible_cpu (cpu) {
cpu               339 arch/powerpc/platforms/cell/cbe_thermal.c 		pr_debug("processing cpu %d\n", cpu);
cpu               340 arch/powerpc/platforms/cell/cbe_thermal.c 		dev = get_cpu_device(cpu);
cpu                33 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	int cpu;
cpu                36 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	cpu = info->policy->cpu;
cpu                37 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
cpu                41 arch/powerpc/platforms/cell/cpufreq_spudemand.c 			cpu, busy_spus, info->busy_spus);
cpu                61 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
cpu                68 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
cpu                78 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	unsigned int cpu = policy->cpu;
cpu                79 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
cpu                83 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	if (!cpu_online(cpu)) {
cpu                84 arch/powerpc/platforms/cell/cpufreq_spudemand.c 		printk(KERN_ERR "cpu %d is not online\n", cpu);
cpu               109 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	unsigned int cpu = policy->cpu;
cpu               110 arch/powerpc/platforms/cell/cpufreq_spudemand.c 	struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
cpu               157 arch/powerpc/platforms/cell/interrupt.c u8 iic_get_target_id(int cpu)
cpu               159 arch/powerpc/platforms/cell/interrupt.c 	return per_cpu(cpu_iic, cpu).target_id;
cpu               172 arch/powerpc/platforms/cell/interrupt.c void iic_message_pass(int cpu, int msg)
cpu               174 arch/powerpc/platforms/cell/interrupt.c 	out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
cpu               379 arch/powerpc/platforms/cell/interrupt.c void iic_set_interrupt_routing(int cpu, int thread, int priority)
cpu               381 arch/powerpc/platforms/cell/interrupt.c 	struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
cpu               383 arch/powerpc/platforms/cell/interrupt.c 	int node = cpu >> 1;
cpu                79 arch/powerpc/platforms/cell/interrupt.h extern void iic_message_pass(int cpu, int msg);
cpu                83 arch/powerpc/platforms/cell/interrupt.h extern u8 iic_get_target_id(int cpu);
cpu                87 arch/powerpc/platforms/cell/interrupt.h extern void iic_set_interrupt_routing(int cpu, int thread, int priority);
cpu               107 arch/powerpc/platforms/cell/pervasive.c 	int cpu;
cpu               112 arch/powerpc/platforms/cell/pervasive.c 	for_each_possible_cpu(cpu) {
cpu               113 arch/powerpc/platforms/cell/pervasive.c 		struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
cpu                36 arch/powerpc/platforms/cell/pmu.c 		pmd_regs = cbe_get_cpu_pmd_regs(cpu);		\
cpu                37 arch/powerpc/platforms/cell/pmu.c 		shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);	\
cpu                45 arch/powerpc/platforms/cell/pmu.c 		shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);	\
cpu                52 arch/powerpc/platforms/cell/pmu.c 		pmd_regs = cbe_get_cpu_pmd_regs(cpu);		\
cpu                61 arch/powerpc/platforms/cell/pmu.c u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
cpu                80 arch/powerpc/platforms/cell/pmu.c void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
cpu                92 arch/powerpc/platforms/cell/pmu.c 		pm_ctrl = cbe_read_pm(cpu, pm_control);
cpu                98 arch/powerpc/platforms/cell/pmu.c 			cbe_write_pm(cpu, pm_control, pm_ctrl);
cpu               100 arch/powerpc/platforms/cell/pmu.c 			shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
cpu               113 arch/powerpc/platforms/cell/pmu.c u32 cbe_read_ctr(u32 cpu, u32 ctr)
cpu               118 arch/powerpc/platforms/cell/pmu.c 	val = cbe_read_phys_ctr(cpu, phys_ctr);
cpu               120 arch/powerpc/platforms/cell/pmu.c 	if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
cpu               127 arch/powerpc/platforms/cell/pmu.c void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
cpu               134 arch/powerpc/platforms/cell/pmu.c 	if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
cpu               135 arch/powerpc/platforms/cell/pmu.c 		phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
cpu               143 arch/powerpc/platforms/cell/pmu.c 	cbe_write_phys_ctr(cpu, phys_ctr, val);
cpu               152 arch/powerpc/platforms/cell/pmu.c u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
cpu               163 arch/powerpc/platforms/cell/pmu.c void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
cpu               174 arch/powerpc/platforms/cell/pmu.c u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
cpu               216 arch/powerpc/platforms/cell/pmu.c void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
cpu               258 arch/powerpc/platforms/cell/pmu.c u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
cpu               263 arch/powerpc/platforms/cell/pmu.c 		pm_ctrl = cbe_read_pm(cpu, pm_control);
cpu               271 arch/powerpc/platforms/cell/pmu.c void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
cpu               276 arch/powerpc/platforms/cell/pmu.c 		pm_ctrl = cbe_read_pm(cpu, pm_control);
cpu               286 arch/powerpc/platforms/cell/pmu.c 		cbe_write_pm(cpu, pm_control, pm_ctrl);
cpu               296 arch/powerpc/platforms/cell/pmu.c void cbe_enable_pm(u32 cpu)
cpu               301 arch/powerpc/platforms/cell/pmu.c 	shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
cpu               304 arch/powerpc/platforms/cell/pmu.c 	pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
cpu               305 arch/powerpc/platforms/cell/pmu.c 	cbe_write_pm(cpu, pm_control, pm_ctrl);
cpu               309 arch/powerpc/platforms/cell/pmu.c void cbe_disable_pm(u32 cpu)
cpu               312 arch/powerpc/platforms/cell/pmu.c 	pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
cpu               313 arch/powerpc/platforms/cell/pmu.c 	cbe_write_pm(cpu, pm_control, pm_ctrl);
cpu               323 arch/powerpc/platforms/cell/pmu.c void cbe_read_trace_buffer(u32 cpu, u64 *buf)
cpu               325 arch/powerpc/platforms/cell/pmu.c 	struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
cpu               336 arch/powerpc/platforms/cell/pmu.c u32 cbe_get_and_clear_pm_interrupts(u32 cpu)
cpu               339 arch/powerpc/platforms/cell/pmu.c 	return cbe_read_pm(cpu, pm_status);
cpu               343 arch/powerpc/platforms/cell/pmu.c void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
cpu               346 arch/powerpc/platforms/cell/pmu.c 	iic_set_interrupt_routing(cpu, thread, 0);
cpu               350 arch/powerpc/platforms/cell/pmu.c 		cbe_write_pm(cpu, pm_status, mask);
cpu               354 arch/powerpc/platforms/cell/pmu.c void cbe_disable_pm_interrupts(u32 cpu)
cpu               356 arch/powerpc/platforms/cell/pmu.c 	cbe_get_and_clear_pm_interrupts(cpu);
cpu               357 arch/powerpc/platforms/cell/pmu.c 	cbe_write_pm(cpu, pm_status, 0);
cpu                27 arch/powerpc/platforms/cell/ras.c static void dump_fir(int cpu)
cpu                29 arch/powerpc/platforms/cell/ras.c 	struct cbe_pmd_regs __iomem *pregs = cbe_get_cpu_pmd_regs(cpu);
cpu                30 arch/powerpc/platforms/cell/ras.c 	struct cbe_iic_regs __iomem *iregs = cbe_get_cpu_iic_regs(cpu);
cpu                54 arch/powerpc/platforms/cell/ras.c 	int cpu = smp_processor_id();
cpu                56 arch/powerpc/platforms/cell/ras.c 	printk(KERN_ERR "System Error Interrupt on CPU %d !\n", cpu);
cpu                57 arch/powerpc/platforms/cell/ras.c 	dump_fir(cpu);
cpu                63 arch/powerpc/platforms/cell/ras.c 	int cpu = smp_processor_id();
cpu                69 arch/powerpc/platforms/cell/ras.c 	printk(KERN_ERR "Unhandled Maintenance interrupt on CPU %d !\n", cpu);
cpu                75 arch/powerpc/platforms/cell/ras.c 	int cpu = smp_processor_id();
cpu                81 arch/powerpc/platforms/cell/ras.c 	printk(KERN_ERR "Unhandled Thermal interrupt on CPU %d !\n", cpu);
cpu                87 arch/powerpc/platforms/cell/ras.c 	int cpu = smp_processor_id();
cpu                89 arch/powerpc/platforms/cell/ras.c 	printk(KERN_ERR "Machine Check Interrupt on CPU %d !\n", cpu);
cpu                90 arch/powerpc/platforms/cell/ras.c 	dump_fir(cpu);
cpu               101 arch/powerpc/platforms/cell/smp.c static void smp_cell_setup_cpu(int cpu)
cpu               103 arch/powerpc/platforms/cell/smp.c 	if (cpu != boot_cpuid)
cpu                63 arch/powerpc/platforms/cell/spu_priv1_mmio.c static void cpu_affinity_set(struct spu *spu, int cpu)
cpu                70 arch/powerpc/platforms/cell/spu_priv1_mmio.c 			*cpumask = cpumask_of_node(cpu_to_node(cpu));
cpu                76 arch/powerpc/platforms/cell/spu_priv1_mmio.c 	target = iic_get_target_id(cpu);
cpu                21 arch/powerpc/platforms/pasemi/pasemi.h extern void restore_astate(int cpu);
cpu                28 arch/powerpc/platforms/pasemi/pasemi.h static inline void restore_astate(int cpu)
cpu               319 arch/powerpc/platforms/pasemi/setup.c 	int cpu = smp_processor_id();
cpu               335 arch/powerpc/platforms/pasemi/setup.c 	pr_err("Machine Check on CPU %d\n", cpu);
cpu               268 arch/powerpc/platforms/powermac/setup.c 	struct device_node *cpu, *ic;
cpu               278 arch/powerpc/platforms/powermac/setup.c 	for_each_of_cpu_node(cpu) {
cpu               279 arch/powerpc/platforms/powermac/setup.c 		fp = of_get_property(cpu, "clock-frequency", NULL);
cpu               290 arch/powerpc/platforms/powermac/setup.c 			of_node_put(cpu);
cpu               130 arch/powerpc/platforms/powermac/smp.c static inline void psurge_set_ipi(int cpu)
cpu               134 arch/powerpc/platforms/powermac/smp.c 	if (cpu == 0)
cpu               139 arch/powerpc/platforms/powermac/smp.c 		PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
cpu               142 arch/powerpc/platforms/powermac/smp.c static inline void psurge_clr_ipi(int cpu)
cpu               144 arch/powerpc/platforms/powermac/smp.c 	if (cpu > 0) {
cpu               151 arch/powerpc/platforms/powermac/smp.c 			PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
cpu               170 arch/powerpc/platforms/powermac/smp.c static void smp_psurge_cause_ipi(int cpu)
cpu               172 arch/powerpc/platforms/powermac/smp.c 	psurge_set_ipi(cpu);
cpu               667 arch/powerpc/platforms/powermac/smp.c static void core99_init_caches(int cpu)
cpu               673 arch/powerpc/platforms/powermac/smp.c 	if (cpu == 0) {
cpu               677 arch/powerpc/platforms/powermac/smp.c 		printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
cpu               680 arch/powerpc/platforms/powermac/smp.c 		printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
cpu               686 arch/powerpc/platforms/powermac/smp.c 	if (cpu == 0){
cpu               690 arch/powerpc/platforms/powermac/smp.c 		printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
cpu               693 arch/powerpc/platforms/powermac/smp.c 		printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
cpu               724 arch/powerpc/platforms/powermac/smp.c 		struct device_node *cpu;
cpu               728 arch/powerpc/platforms/powermac/smp.c 		cpu = of_find_node_by_type(NULL, "cpu");
cpu               729 arch/powerpc/platforms/powermac/smp.c 		if (cpu != NULL) {
cpu               730 arch/powerpc/platforms/powermac/smp.c 			tbprop = of_get_property(cpu, "timebase-enable", NULL);
cpu               733 arch/powerpc/platforms/powermac/smp.c 			of_node_put(cpu);
cpu               853 arch/powerpc/platforms/powermac/smp.c static int smp_core99_cpu_prepare(unsigned int cpu)
cpu               869 arch/powerpc/platforms/powermac/smp.c static int smp_core99_cpu_online(unsigned int cpu)
cpu               926 arch/powerpc/platforms/powermac/smp.c 	int cpu = smp_processor_id();
cpu               930 arch/powerpc/platforms/powermac/smp.c 	pr_debug("CPU%d offline\n", cpu);
cpu               931 arch/powerpc/platforms/powermac/smp.c 	generic_set_cpu_dead(cpu);
cpu               941 arch/powerpc/platforms/powermac/smp.c 	int cpu = smp_processor_id();
cpu               952 arch/powerpc/platforms/powermac/smp.c 	printk(KERN_INFO "CPU#%d offline\n", cpu);
cpu               953 arch/powerpc/platforms/powermac/smp.c 	generic_set_cpu_dead(cpu);
cpu              1015 arch/powerpc/platforms/powermac/smp.c 		int cpu;
cpu              1017 arch/powerpc/platforms/powermac/smp.c 		for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
cpu              1018 arch/powerpc/platforms/powermac/smp.c 			set_cpu_possible(cpu, true);
cpu                66 arch/powerpc/platforms/powernv/idle.c 	int cpu;
cpu                83 arch/powerpc/platforms/powernv/idle.c 	for_each_present_cpu(cpu) {
cpu                84 arch/powerpc/platforms/powernv/idle.c 		uint64_t pir = get_hard_smp_processor_id(cpu);
cpu                85 arch/powerpc/platforms/powernv/idle.c 		uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
cpu               108 arch/powerpc/platforms/powernv/idle.c 		if (cpu_thread_in_core(cpu) == 0) {
cpu               227 arch/powerpc/platforms/powernv/idle.c 	int cpu = raw_smp_processor_id();
cpu               228 arch/powerpc/platforms/powernv/idle.c 	int first = cpu_first_thread_sibling(cpu);
cpu               229 arch/powerpc/platforms/powernv/idle.c 	int thread_nr = cpu_thread_in_core(cpu);
cpu               237 arch/powerpc/platforms/powernv/idle.c 	int cpu = raw_smp_processor_id();
cpu               238 arch/powerpc/platforms/powernv/idle.c 	int first = cpu_first_thread_sibling(cpu);
cpu               239 arch/powerpc/platforms/powernv/idle.c 	int thread_nr = cpu_thread_in_core(cpu);
cpu               247 arch/powerpc/platforms/powernv/idle.c 	int cpu = raw_smp_processor_id();
cpu               248 arch/powerpc/platforms/powernv/idle.c 	int first = cpu_first_thread_sibling(cpu);
cpu               257 arch/powerpc/platforms/powernv/idle.c 	int cpu = raw_smp_processor_id();
cpu               258 arch/powerpc/platforms/powernv/idle.c 	int first = cpu_first_thread_sibling(cpu);
cpu               259 arch/powerpc/platforms/powernv/idle.c 	unsigned long thread = 1UL << cpu_thread_in_core(cpu);
cpu               278 arch/powerpc/platforms/powernv/idle.c 	int cpu = raw_smp_processor_id();
cpu               279 arch/powerpc/platforms/powernv/idle.c 	int first = cpu_first_thread_sibling(cpu);
cpu               314 arch/powerpc/platforms/powernv/idle.c 	int cpu = raw_smp_processor_id();
cpu               315 arch/powerpc/platforms/powernv/idle.c 	int first = cpu_first_thread_sibling(cpu);
cpu               317 arch/powerpc/platforms/powernv/idle.c 	unsigned long thread = 1UL << cpu_thread_in_core(cpu);
cpu               607 arch/powerpc/platforms/powernv/idle.c 	int cpu = raw_smp_processor_id();
cpu               608 arch/powerpc/platforms/powernv/idle.c 	int first = cpu_first_thread_sibling(cpu);
cpu               887 arch/powerpc/platforms/powernv/idle.c 	int cpu, cpu0, thr;
cpu               892 arch/powerpc/platforms/powernv/idle.c 	cpu = smp_processor_id();
cpu               893 arch/powerpc/platforms/powernv/idle.c 	cpu0 = cpu & ~(threads_per_core - 1);
cpu               895 arch/powerpc/platforms/powernv/idle.c 		if (cpu != cpu0 + thr)
cpu               933 arch/powerpc/platforms/powernv/idle.c 	int cpu, cpu0, thr;
cpu               935 arch/powerpc/platforms/powernv/idle.c 	cpu = smp_processor_id();
cpu               936 arch/powerpc/platforms/powernv/idle.c 	cpu0 = cpu & ~(threads_per_core - 1);
cpu               940 arch/powerpc/platforms/powernv/idle.c 		if (cpu != cpu0 + thr)
cpu               949 arch/powerpc/platforms/powernv/idle.c void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
cpu               951 arch/powerpc/platforms/powernv/idle.c 	u64 pir = get_hard_smp_processor_id(cpu);
cpu               968 arch/powerpc/platforms/powernv/idle.c unsigned long pnv_cpu_offline(unsigned int cpu)
cpu               985 arch/powerpc/platforms/powernv/idle.c 		while (!generic_check_cpu_restart(cpu)) {
cpu              1329 arch/powerpc/platforms/powernv/idle.c 	int cpu;
cpu              1333 arch/powerpc/platforms/powernv/idle.c 	for_each_present_cpu(cpu) {
cpu              1334 arch/powerpc/platforms/powernv/idle.c 		struct paca_struct *p = paca_ptrs[cpu];
cpu              1337 arch/powerpc/platforms/powernv/idle.c 		if (cpu == cpu_first_thread_sibling(cpu))
cpu               196 arch/powerpc/platforms/powernv/opal-imc.c 	int nid, cpu;
cpu               202 arch/powerpc/platforms/powernv/opal-imc.c 		cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
cpu               203 arch/powerpc/platforms/powernv/opal-imc.c 		if (cpu >= nr_cpu_ids)
cpu               206 arch/powerpc/platforms/powernv/opal-imc.c 				       get_hard_smp_processor_id(cpu));
cpu               214 arch/powerpc/platforms/powernv/opal-imc.c 	int cpu, rc;
cpu               219 arch/powerpc/platforms/powernv/opal-imc.c 	for_each_cpu(cpu, &cores_map) {
cpu               221 arch/powerpc/platforms/powernv/opal-imc.c 					    get_hard_smp_processor_id(cpu));
cpu               224 arch/powerpc/platforms/powernv/opal-imc.c 				__FUNCTION__, cpu);
cpu               120 arch/powerpc/platforms/powernv/rng.c 	int chip_id, cpu;
cpu               126 arch/powerpc/platforms/powernv/rng.c 	for_each_possible_cpu(cpu) {
cpu               127 arch/powerpc/platforms/powernv/rng.c 		if (per_cpu(powernv_rng, cpu) == NULL ||
cpu               128 arch/powerpc/platforms/powernv/rng.c 		    cpu_to_chip_id(cpu) == chip_id) {
cpu               129 arch/powerpc/platforms/powernv/rng.c 			per_cpu(powernv_rng, cpu) = rng;
cpu               457 arch/powerpc/platforms/powernv/setup.c static unsigned long pnv_get_proc_freq(unsigned int cpu)
cpu               461 arch/powerpc/platforms/powernv/setup.c 	ret_freq = cpufreq_get(cpu) * 1000ul;
cpu                49 arch/powerpc/platforms/powernv/smp.c static void pnv_smp_setup_cpu(int cpu)
cpu                60 arch/powerpc/platforms/powernv/smp.c 	else if (cpu != boot_cpuid)
cpu               132 arch/powerpc/platforms/powernv/smp.c 	int cpu = smp_processor_id();
cpu               138 arch/powerpc/platforms/powernv/smp.c 	set_cpu_online(cpu, false);
cpu               140 arch/powerpc/platforms/powernv/smp.c 	if (cpu == boot_cpuid)
cpu               164 arch/powerpc/platforms/powernv/smp.c 	unsigned int cpu;
cpu               171 arch/powerpc/platforms/powernv/smp.c 	cpu = smp_processor_id();
cpu               172 arch/powerpc/platforms/powernv/smp.c 	DBG("CPU%d offline\n", cpu);
cpu               173 arch/powerpc/platforms/powernv/smp.c 	generic_set_cpu_dead(cpu);
cpu               189 arch/powerpc/platforms/powernv/smp.c 	if (generic_check_cpu_restart(cpu))
cpu               197 arch/powerpc/platforms/powernv/smp.c 				cpu, local_paca->irq_happened);
cpu               212 arch/powerpc/platforms/powernv/smp.c 	pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
cpu               214 arch/powerpc/platforms/powernv/smp.c 	while (!generic_check_cpu_restart(cpu)) {
cpu               222 arch/powerpc/platforms/powernv/smp.c 		kvmppc_clear_host_ipi(cpu);
cpu               224 arch/powerpc/platforms/powernv/smp.c 		srr1 = pnv_cpu_offline(cpu);
cpu               275 arch/powerpc/platforms/powernv/smp.c 		if (srr1 && !generic_check_cpu_restart(cpu))
cpu               277 arch/powerpc/platforms/powernv/smp.c 					cpu, srr1);
cpu               289 arch/powerpc/platforms/powernv/smp.c 	pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
cpu               291 arch/powerpc/platforms/powernv/smp.c 	DBG("CPU%d coming online...\n", cpu);
cpu               310 arch/powerpc/platforms/powernv/smp.c static int pnv_smp_prepare_cpu(int cpu)
cpu               313 arch/powerpc/platforms/powernv/smp.c 		return xive_smp_prepare_cpu(cpu);
cpu               318 arch/powerpc/platforms/powernv/smp.c static void (*ic_cause_ipi)(int cpu);
cpu               320 arch/powerpc/platforms/powernv/smp.c static void pnv_cause_ipi(int cpu)
cpu               322 arch/powerpc/platforms/powernv/smp.c 	if (doorbell_try_core_ipi(cpu))
cpu               325 arch/powerpc/platforms/powernv/smp.c 	ic_cause_ipi(cpu);
cpu               353 arch/powerpc/platforms/powernv/smp.c static int pnv_cause_nmi_ipi(int cpu)
cpu               357 arch/powerpc/platforms/powernv/smp.c 	if (cpu >= 0) {
cpu               358 arch/powerpc/platforms/powernv/smp.c 		int h = get_hard_smp_processor_id(cpu);
cpu               372 arch/powerpc/platforms/powernv/smp.c 	} else if (cpu == NMI_IPI_ALL_OTHERS) {
cpu               150 arch/powerpc/platforms/powernv/subcore.c 	int i, cpu = smp_processor_id();
cpu               152 arch/powerpc/platforms/powernv/subcore.c 	for (i = cpu + 1; i < cpu + threads_per_core; i++)
cpu               175 arch/powerpc/platforms/powernv/subcore.c 	int i, cpu;
cpu               179 arch/powerpc/platforms/powernv/subcore.c 	cpu = smp_processor_id();
cpu               180 arch/powerpc/platforms/powernv/subcore.c 	if (cpu_thread_in_core(cpu) != 0) {
cpu               184 arch/powerpc/platforms/powernv/subcore.c 		per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT;
cpu               197 arch/powerpc/platforms/powernv/subcore.c 	for (i = cpu + 1; i < cpu + threads_per_core; i++)
cpu               209 arch/powerpc/platforms/powernv/subcore.c 	int i, cpu;
cpu               216 arch/powerpc/platforms/powernv/subcore.c 	cpu = smp_processor_id();
cpu               217 arch/powerpc/platforms/powernv/subcore.c 	if (cpu_thread_in_core(cpu) != 0) {
cpu               218 arch/powerpc/platforms/powernv/subcore.c 		split_core_secondary_loop(&per_cpu(split_state, cpu).step);
cpu               267 arch/powerpc/platforms/powernv/subcore.c 	int cpu;
cpu               274 arch/powerpc/platforms/powernv/subcore.c 	for_each_possible_cpu(cpu) {
cpu               275 arch/powerpc/platforms/powernv/subcore.c 		int tid = cpu_thread_in_core(cpu);
cpu               279 arch/powerpc/platforms/powernv/subcore.c 		paca_ptrs[cpu]->subcore_sibling_mask = mask;
cpu               286 arch/powerpc/platforms/powernv/subcore.c 	int cpu, new_mode = *(int *)data;
cpu               296 arch/powerpc/platforms/powernv/subcore.c 		for_each_cpu(cpu, cpu_offline_mask)
cpu               297 arch/powerpc/platforms/powernv/subcore.c 			smp_send_reschedule(cpu);
cpu               304 arch/powerpc/platforms/powernv/subcore.c 		for_each_present_cpu(cpu) {
cpu               305 arch/powerpc/platforms/powernv/subcore.c 			if (cpu >= setup_max_cpus)
cpu               308 arch/powerpc/platforms/powernv/subcore.c 			while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED)
cpu               329 arch/powerpc/platforms/powernv/subcore.c 	int cpu;
cpu               342 arch/powerpc/platforms/powernv/subcore.c 	for_each_present_cpu(cpu) {
cpu               343 arch/powerpc/platforms/powernv/subcore.c 		state = &per_cpu(split_state, cpu);
cpu                28 arch/powerpc/platforms/powernv/vas.c 	int rc, cpu, vasid;
cpu                76 arch/powerpc/platforms/powernv/vas.c 	for_each_possible_cpu(cpu) {
cpu                77 arch/powerpc/platforms/powernv/vas.c 		if (cpu_to_chip_id(cpu) == of_get_ibm_chip_id(dn))
cpu                78 arch/powerpc/platforms/powernv/vas.c 			per_cpu(cpu_vas_id, cpu) = vasid;
cpu               126 arch/powerpc/platforms/powernv/vas.c 	int cpu;
cpu               128 arch/powerpc/platforms/powernv/vas.c 	for_each_possible_cpu(cpu) {
cpu               129 arch/powerpc/platforms/powernv/vas.c 		if (cpu_to_chip_id(cpu) == chipid)
cpu               130 arch/powerpc/platforms/powernv/vas.c 			return per_cpu(cpu_vas_id, cpu);
cpu               168 arch/powerpc/platforms/ps3/interrupt.c static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
cpu               176 arch/powerpc/platforms/ps3/interrupt.c 	if (cpu == PS3_BINDING_CPU_ANY)
cpu               177 arch/powerpc/platforms/ps3/interrupt.c 		cpu = 0;
cpu               179 arch/powerpc/platforms/ps3/interrupt.c 	pd = &per_cpu(ps3_private, cpu);
cpu               191 arch/powerpc/platforms/ps3/interrupt.c 		outlet, cpu, *virq);
cpu               242 arch/powerpc/platforms/ps3/interrupt.c int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
cpu               248 arch/powerpc/platforms/ps3/interrupt.c 	result = ps3_virq_setup(cpu, outlet, virq);
cpu               320 arch/powerpc/platforms/ps3/interrupt.c int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq)
cpu               334 arch/powerpc/platforms/ps3/interrupt.c 	result = ps3_irq_plug_setup(cpu, outlet, virq);
cpu               390 arch/powerpc/platforms/ps3/interrupt.c 	enum ps3_cpu_binding cpu, unsigned int *virq)
cpu               396 arch/powerpc/platforms/ps3/interrupt.c 	result = ps3_event_receive_port_setup(cpu, virq);
cpu               465 arch/powerpc/platforms/ps3/interrupt.c int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
cpu               479 arch/powerpc/platforms/ps3/interrupt.c 	result = ps3_irq_plug_setup(cpu, outlet, virq);
cpu               522 arch/powerpc/platforms/ps3/interrupt.c int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
cpu               541 arch/powerpc/platforms/ps3/interrupt.c 	result = ps3_irq_plug_setup(cpu, outlet, virq);
cpu               578 arch/powerpc/platforms/ps3/interrupt.c int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
cpu               594 arch/powerpc/platforms/ps3/interrupt.c 	result = ps3_irq_plug_setup(cpu, outlet, virq);
cpu               617 arch/powerpc/platforms/ps3/interrupt.c static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
cpu               621 arch/powerpc/platforms/ps3/interrupt.c 		func, line, header, cpu,
cpu               627 arch/powerpc/platforms/ps3/interrupt.c 	const u64 *p, unsigned cpu, const char* func, int line)
cpu               630 arch/powerpc/platforms/ps3/interrupt.c 		func, line, header, cpu, p[0], p[1], p[2], p[3]);
cpu               681 arch/powerpc/platforms/ps3/interrupt.c void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
cpu               683 arch/powerpc/platforms/ps3/interrupt.c 	struct ps3_private *pd = &per_cpu(ps3_private, cpu);
cpu               688 arch/powerpc/platforms/ps3/interrupt.c 		cpu, virq, pd->ipi_debug_brk_mask);
cpu               691 arch/powerpc/platforms/ps3/interrupt.c void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
cpu               693 arch/powerpc/platforms/ps3/interrupt.c 	struct ps3_private *pd = &per_cpu(ps3_private, cpu);
cpu               698 arch/powerpc/platforms/ps3/interrupt.c 		cpu, virq, pd->ipi_mask);
cpu               742 arch/powerpc/platforms/ps3/interrupt.c 	unsigned cpu;
cpu               748 arch/powerpc/platforms/ps3/interrupt.c 	for_each_possible_cpu(cpu) {
cpu               749 arch/powerpc/platforms/ps3/interrupt.c 		struct ps3_private *pd = &per_cpu(ps3_private, cpu);
cpu               752 arch/powerpc/platforms/ps3/interrupt.c 		pd->thread_id = get_hard_smp_processor_id(cpu);
cpu               771 arch/powerpc/platforms/ps3/interrupt.c void ps3_shutdown_IRQ(int cpu)
cpu               775 arch/powerpc/platforms/ps3/interrupt.c 	u64 thread_id = get_hard_smp_processor_id(cpu);
cpu               781 arch/powerpc/platforms/ps3/interrupt.c 		__LINE__, ppe_id, thread_id, cpu, ps3_result(result));
cpu                32 arch/powerpc/platforms/ps3/platform.h void ps3_shutdown_IRQ(int cpu);
cpu                33 arch/powerpc/platforms/ps3/platform.h void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
cpu                34 arch/powerpc/platforms/ps3/platform.h void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
cpu                40 arch/powerpc/platforms/ps3/platform.h void ps3_smp_cleanup_cpu(int cpu);
cpu                42 arch/powerpc/platforms/ps3/platform.h static inline void ps3_smp_cleanup_cpu(int cpu) { }
cpu               248 arch/powerpc/platforms/ps3/setup.c 	int cpu = smp_processor_id();
cpu               250 arch/powerpc/platforms/ps3/setup.c 	DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
cpu               252 arch/powerpc/platforms/ps3/setup.c 	ps3_smp_cleanup_cpu(cpu);
cpu               253 arch/powerpc/platforms/ps3/setup.c 	ps3_shutdown_IRQ(cpu);
cpu                30 arch/powerpc/platforms/ps3/smp.c static void ps3_smp_message_pass(int cpu, int msg)
cpu                40 arch/powerpc/platforms/ps3/smp.c 	virq = per_cpu(ps3_ipi_virqs, cpu)[msg];
cpu                45 arch/powerpc/platforms/ps3/smp.c 			" (%d)\n", __func__, __LINE__, cpu, msg, result);
cpu                50 arch/powerpc/platforms/ps3/smp.c 	int cpu;
cpu                52 arch/powerpc/platforms/ps3/smp.c 	for (cpu = 0; cpu < 2; cpu++) {
cpu                54 arch/powerpc/platforms/ps3/smp.c 		unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
cpu                57 arch/powerpc/platforms/ps3/smp.c 		DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
cpu                71 arch/powerpc/platforms/ps3/smp.c 			result = ps3_event_receive_port_setup(cpu, &virqs[i]);
cpu                77 arch/powerpc/platforms/ps3/smp.c 				__func__, __LINE__, cpu, i, virqs[i]);
cpu                84 arch/powerpc/platforms/ps3/smp.c 				ps3_register_ipi_irq(cpu, virqs[i]);
cpu                87 arch/powerpc/platforms/ps3/smp.c 		ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_NMI_IPI]);
cpu                89 arch/powerpc/platforms/ps3/smp.c 		DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
cpu                93 arch/powerpc/platforms/ps3/smp.c void ps3_smp_cleanup_cpu(int cpu)
cpu                95 arch/powerpc/platforms/ps3/smp.c 	unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
cpu                98 arch/powerpc/platforms/ps3/smp.c 	DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
cpu               106 arch/powerpc/platforms/ps3/smp.c 	DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
cpu               507 arch/powerpc/platforms/ps3/spu.c static void cpu_affinity_set(struct spu *spu, int cpu)
cpu                23 arch/powerpc/platforms/pseries/dtl.c 	int			cpu;
cpu                83 arch/powerpc/platforms/pseries/dtl.c 	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
cpu                94 arch/powerpc/platforms/pseries/dtl.c 	lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
cpu               103 arch/powerpc/platforms/pseries/dtl.c 	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
cpu               111 arch/powerpc/platforms/pseries/dtl.c 	lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
cpu               119 arch/powerpc/platforms/pseries/dtl.c 	return per_cpu(dtl_rings, dtl->cpu).write_index;
cpu               133 arch/powerpc/platforms/pseries/dtl.c 	hwcpu = get_hard_smp_processor_id(dtl->cpu);
cpu               138 arch/powerpc/platforms/pseries/dtl.c 		       "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
cpu               143 arch/powerpc/platforms/pseries/dtl.c 	lppaca_of(dtl->cpu).dtl_idx = 0;
cpu               150 arch/powerpc/platforms/pseries/dtl.c 	lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
cpu               157 arch/powerpc/platforms/pseries/dtl.c 	int hwcpu = get_hard_smp_processor_id(dtl->cpu);
cpu               159 arch/powerpc/platforms/pseries/dtl.c 	lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
cpu               166 arch/powerpc/platforms/pseries/dtl.c 	return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
cpu               188 arch/powerpc/platforms/pseries/dtl.c 	buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
cpu               191 arch/powerpc/platforms/pseries/dtl.c 				__func__, dtl->cpu);
cpu               327 arch/powerpc/platforms/pseries/dtl.c 	sprintf(name, "cpu-%d", dtl->cpu);
cpu               368 arch/powerpc/platforms/pseries/dtl.c 		dtl->cpu = i;
cpu                61 arch/powerpc/platforms/pseries/hotplug-cpu.c enum cpu_state_vals get_cpu_current_state(int cpu)
cpu                63 arch/powerpc/platforms/pseries/hotplug-cpu.c 	return per_cpu(current_state, cpu);
cpu                66 arch/powerpc/platforms/pseries/hotplug-cpu.c void set_cpu_current_state(int cpu, enum cpu_state_vals state)
cpu                68 arch/powerpc/platforms/pseries/hotplug-cpu.c 	per_cpu(current_state, cpu) = state;
cpu                71 arch/powerpc/platforms/pseries/hotplug-cpu.c enum cpu_state_vals get_preferred_offline_state(int cpu)
cpu                73 arch/powerpc/platforms/pseries/hotplug-cpu.c 	return per_cpu(preferred_offline_state, cpu);
cpu                76 arch/powerpc/platforms/pseries/hotplug-cpu.c void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
cpu                78 arch/powerpc/platforms/pseries/hotplug-cpu.c 	per_cpu(preferred_offline_state, cpu) = state;
cpu                81 arch/powerpc/platforms/pseries/hotplug-cpu.c void set_default_offline_state(int cpu)
cpu                83 arch/powerpc/platforms/pseries/hotplug-cpu.c 	per_cpu(preferred_offline_state, cpu) = default_offline_state;
cpu               104 arch/powerpc/platforms/pseries/hotplug-cpu.c 	unsigned int cpu = smp_processor_id();
cpu               115 arch/powerpc/platforms/pseries/hotplug-cpu.c 	if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
cpu               116 arch/powerpc/platforms/pseries/hotplug-cpu.c 		set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
cpu               126 arch/powerpc/platforms/pseries/hotplug-cpu.c 		while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
cpu               141 arch/powerpc/platforms/pseries/hotplug-cpu.c 		if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
cpu               155 arch/powerpc/platforms/pseries/hotplug-cpu.c 	WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
cpu               157 arch/powerpc/platforms/pseries/hotplug-cpu.c 	set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
cpu               168 arch/powerpc/platforms/pseries/hotplug-cpu.c 	int cpu = smp_processor_id();
cpu               170 arch/powerpc/platforms/pseries/hotplug-cpu.c 	set_cpu_online(cpu, false);
cpu               174 arch/powerpc/platforms/pseries/hotplug-cpu.c 	if (cpu == boot_cpuid)
cpu               197 arch/powerpc/platforms/pseries/hotplug-cpu.c static void pseries_cpu_die(unsigned int cpu)
cpu               201 arch/powerpc/platforms/pseries/hotplug-cpu.c 	unsigned int pcpu = get_hard_smp_processor_id(cpu);
cpu               203 arch/powerpc/platforms/pseries/hotplug-cpu.c 	if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
cpu               206 arch/powerpc/platforms/pseries/hotplug-cpu.c 			if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) {
cpu               212 arch/powerpc/platforms/pseries/hotplug-cpu.c 	} else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
cpu               225 arch/powerpc/platforms/pseries/hotplug-cpu.c 		       cpu, pcpu, cpu_status);
cpu               233 arch/powerpc/platforms/pseries/hotplug-cpu.c 	paca_ptrs[cpu]->cpu_start = 0;
cpu               245 arch/powerpc/platforms/pseries/hotplug-cpu.c 	unsigned int cpu;
cpu               291 arch/powerpc/platforms/pseries/hotplug-cpu.c 	for_each_cpu(cpu, tmp) {
cpu               292 arch/powerpc/platforms/pseries/hotplug-cpu.c 		BUG_ON(cpu_present(cpu));
cpu               293 arch/powerpc/platforms/pseries/hotplug-cpu.c 		set_cpu_present(cpu, true);
cpu               294 arch/powerpc/platforms/pseries/hotplug-cpu.c 		set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
cpu               311 arch/powerpc/platforms/pseries/hotplug-cpu.c 	unsigned int cpu;
cpu               325 arch/powerpc/platforms/pseries/hotplug-cpu.c 		for_each_present_cpu(cpu) {
cpu               326 arch/powerpc/platforms/pseries/hotplug-cpu.c 			if (get_hard_smp_processor_id(cpu) != thread)
cpu               328 arch/powerpc/platforms/pseries/hotplug-cpu.c 			BUG_ON(cpu_online(cpu));
cpu               329 arch/powerpc/platforms/pseries/hotplug-cpu.c 			set_cpu_present(cpu, false);
cpu               330 arch/powerpc/platforms/pseries/hotplug-cpu.c 			set_hard_smp_processor_id(cpu, -1);
cpu               331 arch/powerpc/platforms/pseries/hotplug-cpu.c 			update_numa_cpu_lookup_table(cpu, -1);
cpu               334 arch/powerpc/platforms/pseries/hotplug-cpu.c 		if (cpu >= nr_cpu_ids)
cpu               344 arch/powerpc/platforms/pseries/hotplug-cpu.c 	unsigned int cpu;
cpu               358 arch/powerpc/platforms/pseries/hotplug-cpu.c 		for_each_present_cpu(cpu) {
cpu               359 arch/powerpc/platforms/pseries/hotplug-cpu.c 			if (get_hard_smp_processor_id(cpu) != thread)
cpu               361 arch/powerpc/platforms/pseries/hotplug-cpu.c 			BUG_ON(get_cpu_current_state(cpu)
cpu               365 arch/powerpc/platforms/pseries/hotplug-cpu.c 			find_and_online_cpu_nid(cpu);
cpu               366 arch/powerpc/platforms/pseries/hotplug-cpu.c 			rc = device_online(get_cpu_device(cpu));
cpu               373 arch/powerpc/platforms/pseries/hotplug-cpu.c 		if (cpu == num_possible_cpus())
cpu               511 arch/powerpc/platforms/pseries/hotplug-cpu.c 	unsigned int cpu;
cpu               525 arch/powerpc/platforms/pseries/hotplug-cpu.c 		for_each_present_cpu(cpu) {
cpu               526 arch/powerpc/platforms/pseries/hotplug-cpu.c 			if (get_hard_smp_processor_id(cpu) != thread)
cpu               529 arch/powerpc/platforms/pseries/hotplug-cpu.c 			if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
cpu               532 arch/powerpc/platforms/pseries/hotplug-cpu.c 			if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
cpu               533 arch/powerpc/platforms/pseries/hotplug-cpu.c 				set_preferred_offline_state(cpu,
cpu               537 arch/powerpc/platforms/pseries/hotplug-cpu.c 				rc = device_offline(get_cpu_device(cpu));
cpu               549 arch/powerpc/platforms/pseries/hotplug-cpu.c 			set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
cpu               552 arch/powerpc/platforms/pseries/hotplug-cpu.c 			__cpu_die(cpu);
cpu               555 arch/powerpc/platforms/pseries/hotplug-cpu.c 		if (cpu == num_possible_cpus())
cpu               801 arch/powerpc/platforms/pseries/hotplug-cpu.c int dlpar_cpu_readd(int cpu)
cpu               808 arch/powerpc/platforms/pseries/hotplug-cpu.c 	dev = get_cpu_device(cpu);
cpu               938 arch/powerpc/platforms/pseries/hotplug-cpu.c 	int cpu;
cpu               966 arch/powerpc/platforms/pseries/hotplug-cpu.c 			for_each_online_cpu(cpu)
cpu               967 arch/powerpc/platforms/pseries/hotplug-cpu.c 				set_default_offline_state(cpu);
cpu               134 arch/powerpc/platforms/pseries/hvCall_inst.c 	int cpu;
cpu               151 arch/powerpc/platforms/pseries/hvCall_inst.c 	for_each_possible_cpu(cpu) {
cpu               152 arch/powerpc/platforms/pseries/hvCall_inst.c 		snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu);
cpu               155 arch/powerpc/platforms/pseries/hvCall_inst.c 						 per_cpu(hcall_stats, cpu),
cpu                30 arch/powerpc/platforms/pseries/kexec.c 		int cpu = smp_processor_id();
cpu                38 arch/powerpc/platforms/pseries/kexec.c 				       cpu, hwcpu, ret);
cpu                46 arch/powerpc/platforms/pseries/kexec.c 			       cpu, hwcpu, ret);
cpu                52 arch/powerpc/platforms/pseries/kexec.c 			       "(hw %d) failed with %d\n", cpu, hwcpu, ret);
cpu                83 arch/powerpc/platforms/pseries/lpar.c 	int cpu;
cpu                87 arch/powerpc/platforms/pseries/lpar.c 	for_each_possible_cpu(cpu) {
cpu                88 arch/powerpc/platforms/pseries/lpar.c 		pp = paca_ptrs[cpu];
cpu                94 arch/powerpc/platforms/pseries/lpar.c 				cpu);
cpu               113 arch/powerpc/platforms/pseries/lpar.c void register_dtl_buffer(int cpu)
cpu               118 arch/powerpc/platforms/pseries/lpar.c 	int hwcpu = get_hard_smp_processor_id(cpu);
cpu               120 arch/powerpc/platforms/pseries/lpar.c 	pp = paca_ptrs[cpu];
cpu               125 arch/powerpc/platforms/pseries/lpar.c 		lppaca_of(cpu).dtl_idx = 0;
cpu               132 arch/powerpc/platforms/pseries/lpar.c 			       cpu, hwcpu, ret);
cpu               134 arch/powerpc/platforms/pseries/lpar.c 		lppaca_of(cpu).dtl_enable_mask = dtl_mask;
cpu               141 arch/powerpc/platforms/pseries/lpar.c 	int cpu;
cpu               182 arch/powerpc/platforms/pseries/lpar.c 	int cpu;
cpu               185 arch/powerpc/platforms/pseries/lpar.c 	for_each_possible_cpu(cpu) {
cpu               186 arch/powerpc/platforms/pseries/lpar.c 		pp = paca_ptrs[cpu];
cpu               225 arch/powerpc/platforms/pseries/lpar.c static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag)
cpu               230 arch/powerpc/platforms/pseries/lpar.c 	assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE];
cpu               232 arch/powerpc/platforms/pseries/lpar.c 		rc = hcall_vphn(cpu, flag, &assoc[0]);
cpu               240 arch/powerpc/platforms/pseries/lpar.c static __be32 *get_pcpu_associativity(int cpu)
cpu               242 arch/powerpc/platforms/pseries/lpar.c 	return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU);
cpu               245 arch/powerpc/platforms/pseries/lpar.c static __be32 *get_vcpu_associativity(int cpu)
cpu               247 arch/powerpc/platforms/pseries/lpar.c 	return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU);
cpu               369 arch/powerpc/platforms/pseries/lpar.c 	if (d->cpu != smp_processor_id()) {
cpu               384 arch/powerpc/platforms/pseries/lpar.c 				d->cpu,
cpu               400 arch/powerpc/platforms/pseries/lpar.c 	schedule_delayed_work_on(d->cpu, to_delayed_work(work),
cpu               404 arch/powerpc/platforms/pseries/lpar.c static int dtl_worker_online(unsigned int cpu)
cpu               406 arch/powerpc/platforms/pseries/lpar.c 	struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
cpu               410 arch/powerpc/platforms/pseries/lpar.c 	d->cpu = cpu;
cpu               413 arch/powerpc/platforms/pseries/lpar.c 	per_cpu(dtl_entry_ridx, cpu) = 0;
cpu               414 arch/powerpc/platforms/pseries/lpar.c 	register_dtl_buffer(cpu);
cpu               416 arch/powerpc/platforms/pseries/lpar.c 	per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx);
cpu               419 arch/powerpc/platforms/pseries/lpar.c 	schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq);
cpu               423 arch/powerpc/platforms/pseries/lpar.c static int dtl_worker_offline(unsigned int cpu)
cpu               425 arch/powerpc/platforms/pseries/lpar.c 	struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
cpu               430 arch/powerpc/platforms/pseries/lpar.c 	unregister_dtl(get_hard_smp_processor_id(cpu));
cpu               438 arch/powerpc/platforms/pseries/lpar.c 	int cpu;
cpu               441 arch/powerpc/platforms/pseries/lpar.c 	for_each_present_cpu(cpu)
cpu               442 arch/powerpc/platforms/pseries/lpar.c 		lppaca_of(cpu).dtl_enable_mask = dtl_mask;
cpu               447 arch/powerpc/platforms/pseries/lpar.c 	int cpu;
cpu               454 arch/powerpc/platforms/pseries/lpar.c 	for_each_present_cpu(cpu)
cpu               455 arch/powerpc/platforms/pseries/lpar.c 		lppaca_of(cpu).dtl_enable_mask = dtl_mask;
cpu               501 arch/powerpc/platforms/pseries/lpar.c 	int rc, cmd, cpu;
cpu               528 arch/powerpc/platforms/pseries/lpar.c 		for_each_possible_cpu(cpu) {
cpu               529 arch/powerpc/platforms/pseries/lpar.c 			disp = per_cpu_ptr(&vcpu_disp_data, cpu);
cpu               555 arch/powerpc/platforms/pseries/lpar.c 	int cpu;
cpu               563 arch/powerpc/platforms/pseries/lpar.c 	for_each_online_cpu(cpu) {
cpu               564 arch/powerpc/platforms/pseries/lpar.c 		disp = per_cpu_ptr(&vcpu_disp_data, cpu);
cpu               565 arch/powerpc/platforms/pseries/lpar.c 		seq_printf(p, "cpu%d", cpu);
cpu               655 arch/powerpc/platforms/pseries/lpar.c void vpa_init(int cpu)
cpu               657 arch/powerpc/platforms/pseries/lpar.c 	int hwcpu = get_hard_smp_processor_id(cpu);
cpu               665 arch/powerpc/platforms/pseries/lpar.c 	WARN_ON(cpu != smp_processor_id());
cpu               668 arch/powerpc/platforms/pseries/lpar.c 		lppaca_of(cpu).vmxregs_in_use = 1;
cpu               671 arch/powerpc/platforms/pseries/lpar.c 		lppaca_of(cpu).ebb_regs_in_use = 1;
cpu               673 arch/powerpc/platforms/pseries/lpar.c 	addr = __pa(&lppaca_of(cpu));
cpu               678 arch/powerpc/platforms/pseries/lpar.c 		       "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
cpu               688 arch/powerpc/platforms/pseries/lpar.c 		addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr);
cpu               693 arch/powerpc/platforms/pseries/lpar.c 			       cpu, hwcpu, addr, ret);
cpu               700 arch/powerpc/platforms/pseries/lpar.c 	register_dtl_buffer(cpu);
cpu              1978 arch/powerpc/platforms/pseries/lpar.c 	int cpu = (long)filp->private_data;
cpu              1979 arch/powerpc/platforms/pseries/lpar.c 	struct lppaca *lppaca = &lppaca_of(cpu);
cpu               387 arch/powerpc/platforms/pseries/lparcfg.c 	int cpu;
cpu               396 arch/powerpc/platforms/pseries/lparcfg.c 	for_each_possible_cpu(cpu) {
cpu               397 arch/powerpc/platforms/pseries/lparcfg.c 		cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
cpu               398 arch/powerpc/platforms/pseries/lparcfg.c 		cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
cpu               411 arch/powerpc/platforms/pseries/lparcfg.c 	int cpu;
cpu               415 arch/powerpc/platforms/pseries/lparcfg.c 	for_each_possible_cpu(cpu) {
cpu               416 arch/powerpc/platforms/pseries/lparcfg.c 		dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
cpu               418 arch/powerpc/platforms/pseries/lparcfg.c 			be32_to_cpu(lppaca_of(cpu).dispersion_count);
cpu                14 arch/powerpc/platforms/pseries/offline_states.h extern enum cpu_state_vals get_cpu_current_state(int cpu);
cpu                15 arch/powerpc/platforms/pseries/offline_states.h extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
cpu                16 arch/powerpc/platforms/pseries/offline_states.h extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
cpu                17 arch/powerpc/platforms/pseries/offline_states.h extern void set_default_offline_state(int cpu);
cpu                19 arch/powerpc/platforms/pseries/offline_states.h static inline enum cpu_state_vals get_cpu_current_state(int cpu)
cpu                24 arch/powerpc/platforms/pseries/offline_states.h static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state)
cpu                28 arch/powerpc/platforms/pseries/offline_states.h static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
cpu                32 arch/powerpc/platforms/pseries/offline_states.h static inline void set_default_offline_state(int cpu)
cpu                37 arch/powerpc/platforms/pseries/offline_states.h extern enum cpu_state_vals get_preferred_offline_state(int cpu);
cpu                36 arch/powerpc/platforms/pseries/pseries_energy.c static u32 cpu_to_drc_index(int cpu)
cpu                48 arch/powerpc/platforms/pseries/pseries_energy.c 	thread_index = cpu_core_index_of_thread(cpu);
cpu               106 arch/powerpc/platforms/pseries/pseries_energy.c 		printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu);
cpu               114 arch/powerpc/platforms/pseries/pseries_energy.c 	int thread_index = 0, cpu = 0;
cpu               143 arch/powerpc/platforms/pseries/pseries_energy.c 				cpu += drc.num_sequential_elems;
cpu               146 arch/powerpc/platforms/pseries/pseries_energy.c 			cpu += ((drc_index - drc.drc_index_start) /
cpu               149 arch/powerpc/platforms/pseries/pseries_energy.c 			thread_index = cpu_first_thread_of_core(cpu);
cpu               193 arch/powerpc/platforms/pseries/pseries_energy.c 	int rc, cnt, i, cpu;
cpu               216 arch/powerpc/platforms/pseries/pseries_energy.c 		cpu = drc_index_to_cpu(buf_page[2*i+1]);
cpu               217 arch/powerpc/platforms/pseries/pseries_energy.c 		if ((cpu_online(cpu) && !activate) ||
cpu               218 arch/powerpc/platforms/pseries/pseries_energy.c 		    (!cpu_online(cpu) && activate))
cpu               219 arch/powerpc/platforms/pseries/pseries_energy.c 			s += sprintf(s, "%d,", cpu);
cpu               305 arch/powerpc/platforms/pseries/pseries_energy.c 	int cpu, err;
cpu               320 arch/powerpc/platforms/pseries/pseries_energy.c 	for_each_possible_cpu(cpu) {
cpu               321 arch/powerpc/platforms/pseries/pseries_energy.c 		cpu_dev = get_cpu_device(cpu);
cpu               342 arch/powerpc/platforms/pseries/pseries_energy.c 	int cpu;
cpu               352 arch/powerpc/platforms/pseries/pseries_energy.c 	for_each_possible_cpu(cpu) {
cpu               353 arch/powerpc/platforms/pseries/pseries_energy.c 		cpu_dev = get_cpu_device(cpu);
cpu               310 arch/powerpc/platforms/pseries/rtas-fadump.c 	int i, rc = 0, cpu = 0;
cpu               352 arch/powerpc/platforms/pseries/rtas-fadump.c 		cpu = (be64_to_cpu(reg_entry->reg_value) &
cpu               354 arch/powerpc/platforms/pseries/rtas-fadump.c 		if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) {
cpu               358 arch/powerpc/platforms/pseries/rtas-fadump.c 		pr_debug("Reading register data for cpu %d...\n", cpu);
cpu               359 arch/powerpc/platforms/pseries/rtas-fadump.c 		if (fdh && fdh->crashing_cpu == cpu) {
cpu               135 arch/powerpc/platforms/pseries/smp.c static void smp_setup_cpu(int cpu)
cpu               139 arch/powerpc/platforms/pseries/smp.c 	else if (cpu != boot_cpuid)
cpu               143 arch/powerpc/platforms/pseries/smp.c 		vpa_init(cpu);
cpu               145 arch/powerpc/platforms/pseries/smp.c 	cpumask_clear_cpu(cpu, of_spin_mask);
cpu               147 arch/powerpc/platforms/pseries/smp.c 	set_cpu_current_state(cpu, CPU_STATE_ONLINE);
cpu               148 arch/powerpc/platforms/pseries/smp.c 	set_default_offline_state(cpu);
cpu               184 arch/powerpc/platforms/pseries/smp.c static int pseries_smp_prepare_cpu(int cpu)
cpu               187 arch/powerpc/platforms/pseries/smp.c 		return xive_smp_prepare_cpu(cpu);
cpu               191 arch/powerpc/platforms/pseries/smp.c static void smp_pseries_cause_ipi(int cpu)
cpu               194 arch/powerpc/platforms/pseries/smp.c 	if (doorbell_try_core_ipi(cpu))
cpu               197 arch/powerpc/platforms/pseries/smp.c 	icp_ops->cause_ipi(cpu);
cpu               200 arch/powerpc/platforms/pseries/smp.c static int pseries_cause_nmi_ipi(int cpu)
cpu               204 arch/powerpc/platforms/pseries/smp.c 	if (cpu == NMI_IPI_ALL_OTHERS) {
cpu               207 arch/powerpc/platforms/pseries/smp.c 		if (cpu < 0) {
cpu               208 arch/powerpc/platforms/pseries/smp.c 			WARN_ONCE(true, "incorrect cpu parameter %d", cpu);
cpu               212 arch/powerpc/platforms/pseries/smp.c 		hwcpu = get_hard_smp_processor_id(cpu);
cpu                79 arch/powerpc/platforms/pseries/vphn.c long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity)
cpu                84 arch/powerpc/platforms/pseries/vphn.c 	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, cpu);
cpu                27 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v1_irq_mask(int cpu)
cpu                29 arch/powerpc/sysdev/fsl_rcpm.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu                38 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v2_irq_mask(int cpu)
cpu                40 arch/powerpc/sysdev/fsl_rcpm.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu                49 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v1_irq_unmask(int cpu)
cpu                51 arch/powerpc/sysdev/fsl_rcpm.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu                60 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v2_irq_unmask(int cpu)
cpu                62 arch/powerpc/sysdev/fsl_rcpm.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu                87 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v1_cpu_enter_state(int cpu, int state)
cpu                89 arch/powerpc/sysdev/fsl_rcpm.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu               105 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v2_cpu_enter_state(int cpu, int state)
cpu               107 arch/powerpc/sysdev/fsl_rcpm.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu               108 arch/powerpc/sysdev/fsl_rcpm.c 	u32 mask = 1 << cpu_core_index_of_thread(cpu);
cpu               129 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v1_cpu_die(int cpu)
cpu               131 arch/powerpc/sysdev/fsl_rcpm.c 	rcpm_v1_cpu_enter_state(cpu, E500_PM_PH15);
cpu               135 arch/powerpc/sysdev/fsl_rcpm.c static void qoriq_disable_thread(int cpu)
cpu               137 arch/powerpc/sysdev/fsl_rcpm.c 	int thread = cpu_thread_in_core(cpu);
cpu               143 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v2_cpu_die(int cpu)
cpu               149 arch/powerpc/sysdev/fsl_rcpm.c 		primary = cpu_first_thread_sibling(cpu);
cpu               152 arch/powerpc/sysdev/fsl_rcpm.c 			rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
cpu               155 arch/powerpc/sysdev/fsl_rcpm.c 			qoriq_disable_thread(cpu);
cpu               161 arch/powerpc/sysdev/fsl_rcpm.c 		rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
cpu               164 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v1_cpu_exit_state(int cpu, int state)
cpu               166 arch/powerpc/sysdev/fsl_rcpm.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu               182 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v1_cpu_up_prepare(int cpu)
cpu               184 arch/powerpc/sysdev/fsl_rcpm.c 	rcpm_v1_cpu_exit_state(cpu, E500_PM_PH15);
cpu               185 arch/powerpc/sysdev/fsl_rcpm.c 	rcpm_v1_irq_unmask(cpu);
cpu               188 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v2_cpu_exit_state(int cpu, int state)
cpu               190 arch/powerpc/sysdev/fsl_rcpm.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu               191 arch/powerpc/sysdev/fsl_rcpm.c 	u32 mask = 1 << cpu_core_index_of_thread(cpu);
cpu               211 arch/powerpc/sysdev/fsl_rcpm.c static void rcpm_v2_cpu_up_prepare(int cpu)
cpu               213 arch/powerpc/sysdev/fsl_rcpm.c 	rcpm_v2_cpu_exit_state(cpu, E500_PM_PH20);
cpu               214 arch/powerpc/sysdev/fsl_rcpm.c 	rcpm_v2_irq_unmask(cpu);
cpu                41 arch/powerpc/sysdev/ge/ge_pic.c #define GEF_PIC_INTR_MASK(cpu)	(0x0010 + (0x4 * cpu))
cpu                45 arch/powerpc/sysdev/ge/ge_pic.c #define GEF_PIC_MCP_MASK(cpu)	(0x0018 + (0x4 * cpu))
cpu               160 arch/powerpc/sysdev/mpic.c 	unsigned int cpu = 0;
cpu               163 arch/powerpc/sysdev/mpic.c 		cpu = hard_smp_processor_id();
cpu               165 arch/powerpc/sysdev/mpic.c 	return cpu;
cpu               253 arch/powerpc/sysdev/mpic.c 	unsigned int cpu = mpic_processor_id(mpic);
cpu               255 arch/powerpc/sysdev/mpic.c 	return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
cpu               260 arch/powerpc/sysdev/mpic.c 	unsigned int cpu = mpic_processor_id(mpic);
cpu               262 arch/powerpc/sysdev/mpic.c 	_mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
cpu              1071 arch/powerpc/sysdev/mpic.c 		int cpu;
cpu              1074 arch/powerpc/sysdev/mpic.c 		cpu = mpic_processor_id(mpic);
cpu              1078 arch/powerpc/sysdev/mpic.c 		mpic_set_destination(virq, cpu);
cpu              1447 arch/powerpc/sysdev/mpic.c 		unsigned int cpu = get_hard_smp_processor_id(i);
cpu              1449 arch/powerpc/sysdev/mpic.c 		mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu],
cpu              1450 arch/powerpc/sysdev/mpic.c 			 MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE),
cpu              1551 arch/powerpc/sysdev/mpic.c 	int i, cpu;
cpu              1606 arch/powerpc/sysdev/mpic.c 	cpu = mpic_processor_id(mpic);
cpu              1619 arch/powerpc/sysdev/mpic.c 			mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu);
cpu              1862 arch/powerpc/sysdev/mpic.c void smp_mpic_message_pass(int cpu, int msg)
cpu              1880 arch/powerpc/sysdev/mpic.c 	physmask = 1 << get_hard_smp_processor_id(cpu);
cpu              1900 arch/powerpc/sysdev/mpic.c void smp_mpic_setup_cpu(int cpu)
cpu              1905 arch/powerpc/sysdev/mpic.c void mpic_reset_core(int cpu)
cpu              1909 arch/powerpc/sysdev/mpic.c 	int cpuid = get_hard_smp_processor_id(cpu);
cpu                36 arch/powerpc/sysdev/mpic.h extern void mpic_reset_core(int cpu);
cpu                84 arch/powerpc/sysdev/xics/icp-hv.c 	int cpu = smp_processor_id();
cpu                87 arch/powerpc/sysdev/xics/icp-hv.c 	icp_hv_set_qirr(cpu, 0xff);
cpu               136 arch/powerpc/sysdev/xics/icp-hv.c static void icp_hv_cause_ipi(int cpu)
cpu               138 arch/powerpc/sysdev/xics/icp-hv.c 	icp_hv_set_qirr(cpu, IPI_PRIORITY);
cpu               143 arch/powerpc/sysdev/xics/icp-hv.c 	int cpu = smp_processor_id();
cpu               145 arch/powerpc/sysdev/xics/icp-hv.c 	icp_hv_set_qirr(cpu, 0xff);
cpu                49 arch/powerpc/sysdev/xics/icp-native.c 	int cpu = smp_processor_id();
cpu                57 arch/powerpc/sysdev/xics/icp-native.c 	return in_be32(&icp_native_regs[cpu]->xirr.word);
cpu                62 arch/powerpc/sysdev/xics/icp-native.c 	int cpu = smp_processor_id();
cpu                64 arch/powerpc/sysdev/xics/icp-native.c 	out_be32(&icp_native_regs[cpu]->xirr.word, value);
cpu                69 arch/powerpc/sysdev/xics/icp-native.c 	int cpu = smp_processor_id();
cpu                71 arch/powerpc/sysdev/xics/icp-native.c 	out_8(&icp_native_regs[cpu]->xirr.bytes[0], value);
cpu                96 arch/powerpc/sysdev/xics/icp-native.c 	int cpu = smp_processor_id();
cpu                99 arch/powerpc/sysdev/xics/icp-native.c 	icp_native_set_qirr(cpu, 0xff);
cpu               141 arch/powerpc/sysdev/xics/icp-native.c static void icp_native_cause_ipi(int cpu)
cpu               143 arch/powerpc/sysdev/xics/icp-native.c 	kvmppc_set_host_ipi(cpu);
cpu               144 arch/powerpc/sysdev/xics/icp-native.c 	icp_native_set_qirr(cpu, IPI_PRIORITY);
cpu               148 arch/powerpc/sysdev/xics/icp-native.c void icp_native_cause_ipi_rm(int cpu)
cpu               162 arch/powerpc/sysdev/xics/icp-native.c 	xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
cpu               181 arch/powerpc/sysdev/xics/icp-native.c 		int cpu = smp_processor_id();
cpu               182 arch/powerpc/sysdev/xics/icp-native.c 		kvmppc_clear_host_ipi(cpu);
cpu               183 arch/powerpc/sysdev/xics/icp-native.c 		icp_native_set_qirr(cpu, 0xff);
cpu               193 arch/powerpc/sysdev/xics/icp-native.c void xics_wake_cpu(int cpu)
cpu               195 arch/powerpc/sysdev/xics/icp-native.c 	icp_native_set_qirr(cpu, IPI_PRIORITY);
cpu               201 arch/powerpc/sysdev/xics/icp-native.c 	int cpu = smp_processor_id();
cpu               203 arch/powerpc/sysdev/xics/icp-native.c 	kvmppc_clear_host_ipi(cpu);
cpu               204 arch/powerpc/sysdev/xics/icp-native.c 	icp_native_set_qirr(cpu, 0xff);
cpu               215 arch/powerpc/sysdev/xics/icp-native.c 	int i, cpu = -1;
cpu               224 arch/powerpc/sysdev/xics/icp-native.c 			cpu = i;
cpu               232 arch/powerpc/sysdev/xics/icp-native.c 	if (cpu == -1)
cpu               236 arch/powerpc/sysdev/xics/icp-native.c 			  cpu, hw_id);
cpu               240 arch/powerpc/sysdev/xics/icp-native.c 			cpu, hw_id);
cpu               244 arch/powerpc/sysdev/xics/icp-native.c 	icp_native_regs[cpu] = ioremap(addr, size);
cpu               245 arch/powerpc/sysdev/xics/icp-native.c 	kvmppc_set_xics_phys(cpu, addr);
cpu               246 arch/powerpc/sysdev/xics/icp-native.c 	if (!icp_native_regs[cpu]) {
cpu               248 arch/powerpc/sysdev/xics/icp-native.c 			cpu, hw_id, addr);
cpu               125 arch/powerpc/sysdev/xics/icp-opal.c static void icp_opal_cause_ipi(int cpu)
cpu               127 arch/powerpc/sysdev/xics/icp-opal.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu               129 arch/powerpc/sysdev/xics/icp-opal.c 	kvmppc_set_host_ipi(cpu);
cpu               135 arch/powerpc/sysdev/xics/icp-opal.c 	int cpu = smp_processor_id();
cpu               137 arch/powerpc/sysdev/xics/icp-opal.c 	kvmppc_clear_host_ipi(cpu);
cpu               138 arch/powerpc/sysdev/xics/icp-opal.c 	opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
cpu               159 arch/powerpc/sysdev/xics/icp-opal.c 			int cpu = smp_processor_id();
cpu               160 arch/powerpc/sysdev/xics/icp-opal.c 			kvmppc_clear_host_ipi(cpu);
cpu               161 arch/powerpc/sysdev/xics/icp-opal.c 			opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
cpu               183 arch/powerpc/sysdev/xics/xics-common.c 	int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
cpu               240 arch/powerpc/sysdev/xics/xics-common.c 		if (cpu_online(cpu))
cpu               242 arch/powerpc/sysdev/xics/xics-common.c 				virq, cpu);
cpu               238 arch/powerpc/sysdev/xive/common.c notrace void xmon_xive_do_dump(int cpu)
cpu               240 arch/powerpc/sysdev/xive/common.c 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
cpu               242 arch/powerpc/sysdev/xive/common.c 	xmon_printf("CPU %d:", cpu);
cpu               464 arch/powerpc/sysdev/xive/common.c static bool xive_try_pick_target(int cpu)
cpu               466 arch/powerpc/sysdev/xive/common.c 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
cpu               488 arch/powerpc/sysdev/xive/common.c static void xive_dec_target_count(int cpu)
cpu               490 arch/powerpc/sysdev/xive/common.c 	struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
cpu               493 arch/powerpc/sysdev/xive/common.c 	if (WARN_ON(cpu < 0 || !xc)) {
cpu               494 arch/powerpc/sysdev/xive/common.c 		pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
cpu               512 arch/powerpc/sysdev/xive/common.c 	int cpu, first, num, i;
cpu               519 arch/powerpc/sysdev/xive/common.c 	cpu = cpumask_first(mask);
cpu               520 arch/powerpc/sysdev/xive/common.c 	for (i = 0; i < first && cpu < nr_cpu_ids; i++)
cpu               521 arch/powerpc/sysdev/xive/common.c 		cpu = cpumask_next(cpu, mask);
cpu               524 arch/powerpc/sysdev/xive/common.c 	if (WARN_ON(cpu >= nr_cpu_ids))
cpu               525 arch/powerpc/sysdev/xive/common.c 		cpu = cpumask_first(cpu_online_mask);
cpu               528 arch/powerpc/sysdev/xive/common.c 	first = cpu;
cpu               539 arch/powerpc/sysdev/xive/common.c 		if (cpu_online(cpu) && xive_try_pick_target(cpu))
cpu               540 arch/powerpc/sysdev/xive/common.c 			return cpu;
cpu               541 arch/powerpc/sysdev/xive/common.c 		cpu = cpumask_next(cpu, mask);
cpu               543 arch/powerpc/sysdev/xive/common.c 		if (cpu >= nr_cpu_ids)
cpu               544 arch/powerpc/sysdev/xive/common.c 			cpu = cpumask_first(mask);
cpu               545 arch/powerpc/sysdev/xive/common.c 	} while (cpu != first);
cpu               561 arch/powerpc/sysdev/xive/common.c 	int cpu = -1;
cpu               570 arch/powerpc/sysdev/xive/common.c 		for_each_cpu_and(cpu, affinity, cpu_online_mask) {
cpu               571 arch/powerpc/sysdev/xive/common.c 			struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
cpu               573 arch/powerpc/sysdev/xive/common.c 				cpumask_set_cpu(cpu, mask);
cpu               577 arch/powerpc/sysdev/xive/common.c 			cpu = -1;
cpu               579 arch/powerpc/sysdev/xive/common.c 			cpu = xive_find_target_in_mask(mask, fuzz++);
cpu               581 arch/powerpc/sysdev/xive/common.c 		if (cpu >= 0)
cpu               582 arch/powerpc/sysdev/xive/common.c 			return cpu;
cpu              1074 arch/powerpc/sysdev/xive/common.c static void xive_cause_ipi(int cpu)
cpu              1079 arch/powerpc/sysdev/xive/common.c 	xc = per_cpu(xive_cpu, cpu);
cpu              1082 arch/powerpc/sysdev/xive/common.c 		    smp_processor_id(), cpu, xc->hw_ipi);
cpu              1145 arch/powerpc/sysdev/xive/common.c static int xive_setup_cpu_ipi(unsigned int cpu)
cpu              1150 arch/powerpc/sysdev/xive/common.c 	pr_debug("Setting up IPI for CPU %d\n", cpu);
cpu              1152 arch/powerpc/sysdev/xive/common.c 	xc = per_cpu(xive_cpu, cpu);
cpu              1159 arch/powerpc/sysdev/xive/common.c 	if (xive_ops->get_ipi(cpu, xc))
cpu              1168 arch/powerpc/sysdev/xive/common.c 		pr_err("Failed to populate IPI data on CPU %d\n", cpu);
cpu              1172 arch/powerpc/sysdev/xive/common.c 				     get_hard_smp_processor_id(cpu),
cpu              1175 arch/powerpc/sysdev/xive/common.c 		pr_err("Failed to map IPI CPU %d\n", cpu);
cpu              1178 arch/powerpc/sysdev/xive/common.c 	pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
cpu              1187 arch/powerpc/sysdev/xive/common.c static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
cpu              1209 arch/powerpc/sysdev/xive/common.c 	xive_ops->put_ipi(cpu, xc);
cpu              1315 arch/powerpc/sysdev/xive/common.c static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
cpu              1318 arch/powerpc/sysdev/xive/common.c 		xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
cpu              1321 arch/powerpc/sysdev/xive/common.c static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
cpu              1327 arch/powerpc/sysdev/xive/common.c 		rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
cpu              1332 arch/powerpc/sysdev/xive/common.c static int xive_prepare_cpu(unsigned int cpu)
cpu              1336 arch/powerpc/sysdev/xive/common.c 	xc = per_cpu(xive_cpu, cpu);
cpu              1341 arch/powerpc/sysdev/xive/common.c 				  GFP_KERNEL, cpu_to_node(cpu));
cpu              1344 arch/powerpc/sysdev/xive/common.c 		np = of_get_cpu_node(cpu, NULL);
cpu              1350 arch/powerpc/sysdev/xive/common.c 		per_cpu(xive_cpu, cpu) = xc;
cpu              1354 arch/powerpc/sysdev/xive/common.c 	return xive_setup_cpu_queues(cpu, xc);
cpu              1381 arch/powerpc/sysdev/xive/common.c int xive_smp_prepare_cpu(unsigned int cpu)
cpu              1386 arch/powerpc/sysdev/xive/common.c 	rc = xive_prepare_cpu(cpu);
cpu              1391 arch/powerpc/sysdev/xive/common.c 	return xive_setup_cpu_ipi(cpu);
cpu              1395 arch/powerpc/sysdev/xive/common.c static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
cpu              1427 arch/powerpc/sysdev/xive/common.c 			cpu, irq);
cpu              1453 arch/powerpc/sysdev/xive/common.c 	unsigned int cpu = smp_processor_id();
cpu              1463 arch/powerpc/sysdev/xive/common.c 	xive_flush_cpu_queue(cpu, xc);
cpu              1473 arch/powerpc/sysdev/xive/common.c 	unsigned int cpu = smp_processor_id();
cpu              1476 arch/powerpc/sysdev/xive/common.c 	xive_flush_cpu_queue(cpu, xc);
cpu              1486 arch/powerpc/sysdev/xive/common.c 	unsigned int cpu = smp_processor_id();
cpu              1493 arch/powerpc/sysdev/xive/common.c 		xive_ops->teardown_cpu(cpu, xc);
cpu              1497 arch/powerpc/sysdev/xive/common.c 	xive_cleanup_cpu_ipi(cpu, xc);
cpu              1501 arch/powerpc/sysdev/xive/common.c 	xive_cleanup_cpu_queues(cpu, xc);
cpu              1538 arch/powerpc/sysdev/xive/common.c __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
cpu              1545 arch/powerpc/sysdev/xive/common.c 	pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
cpu               215 arch/powerpc/sysdev/xive/native.c static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
cpu               220 arch/powerpc/sysdev/xive/native.c 	qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
cpu               224 arch/powerpc/sysdev/xive/native.c 	return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
cpu               228 arch/powerpc/sysdev/xive/native.c static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
cpu               237 arch/powerpc/sysdev/xive/native.c 	__xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
cpu               260 arch/powerpc/sysdev/xive/native.c static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
cpu               272 arch/powerpc/sysdev/xive/native.c 			pr_err("Failed to allocate IPI on CPU %d\n", cpu);
cpu               310 arch/powerpc/sysdev/xive/native.c static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
cpu               394 arch/powerpc/sysdev/xive/native.c static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
cpu               409 arch/powerpc/sysdev/xive/native.c 	vp = xive_pool_vps + cpu;
cpu               417 arch/powerpc/sysdev/xive/native.c 		pr_err("Failed to enable pool VP on CPU %d\n", cpu);
cpu               424 arch/powerpc/sysdev/xive/native.c 		pr_err("Failed to get pool VP info CPU %d\n", cpu);
cpu               434 arch/powerpc/sysdev/xive/native.c static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
cpu               446 arch/powerpc/sysdev/xive/native.c 	vp = xive_pool_vps + cpu;
cpu               557 arch/powerpc/sysdev/xive/native.c 	u32 val, cpu;
cpu               598 arch/powerpc/sysdev/xive/native.c 	for_each_possible_cpu(cpu)
cpu               599 arch/powerpc/sysdev/xive/native.c 		kvmppc_set_xive_tima(cpu, r.start, tima);
cpu               509 arch/powerpc/sysdev/xive/spapr.c static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
cpu               515 arch/powerpc/sysdev/xive/spapr.c 	qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
cpu               519 arch/powerpc/sysdev/xive/spapr.c 	return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
cpu               523 arch/powerpc/sysdev/xive/spapr.c static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
cpu               529 arch/powerpc/sysdev/xive/spapr.c 	int hw_cpu = get_hard_smp_processor_id(cpu);
cpu               548 arch/powerpc/sysdev/xive/spapr.c static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
cpu               553 arch/powerpc/sysdev/xive/spapr.c 		pr_err("Failed to allocate IPI on CPU %d\n", cpu);
cpu               561 arch/powerpc/sysdev/xive/spapr.c static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
cpu               628 arch/powerpc/sysdev/xive/spapr.c static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
cpu               637 arch/powerpc/sysdev/xive/spapr.c static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
cpu                45 arch/powerpc/sysdev/xive/xive-internal.h 	int	(*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
cpu                46 arch/powerpc/sysdev/xive/xive-internal.h 	void	(*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
cpu                47 arch/powerpc/sysdev/xive/xive-internal.h 	void	(*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
cpu                48 arch/powerpc/sysdev/xive/xive-internal.h 	void	(*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
cpu                57 arch/powerpc/sysdev/xive/xive-internal.h 	int	(*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
cpu                58 arch/powerpc/sysdev/xive/xive-internal.h 	void	(*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
cpu                65 arch/powerpc/sysdev/xive/xive-internal.h __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
cpu               496 arch/powerpc/xmon/xmon.c 	int cpu;
cpu               519 arch/powerpc/xmon/xmon.c 	cpu = smp_processor_id();
cpu               520 arch/powerpc/xmon/xmon.c 	if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
cpu               531 arch/powerpc/xmon/xmon.c 		       cpu, regs->trap, getvecname(TRAP(regs)));
cpu               533 arch/powerpc/xmon/xmon.c 		longjmp(xmon_fault_jmp[cpu], 1);
cpu               540 arch/powerpc/xmon/xmon.c 			       "on cpu 0x%x\n", cpu);
cpu               544 arch/powerpc/xmon/xmon.c 		secondary = !(xmon_taken && cpu == xmon_owner);
cpu               548 arch/powerpc/xmon/xmon.c 	xmon_fault_jmp[cpu] = recurse_jmp;
cpu               562 arch/powerpc/xmon/xmon.c 			       cpu, BP_NUM(bp));
cpu               571 arch/powerpc/xmon/xmon.c 	cpumask_set_cpu(cpu, &cpus_in_xmon);
cpu               594 arch/powerpc/xmon/xmon.c 		xmon_owner = cpu;
cpu               629 arch/powerpc/xmon/xmon.c 			if (cpu == xmon_owner) {
cpu               636 arch/powerpc/xmon/xmon.c 				while (cpu == xmon_owner)
cpu               657 arch/powerpc/xmon/xmon.c 	cpumask_clear_cpu(cpu, &cpus_in_xmon);
cpu               658 arch/powerpc/xmon/xmon.c 	xmon_fault_jmp[cpu] = NULL;
cpu              1214 arch/powerpc/xmon/xmon.c 	unsigned long cpu, first_cpu, last_cpu;
cpu              1217 arch/powerpc/xmon/xmon.c 	if (!scanhex(&cpu)) {
cpu              1221 arch/powerpc/xmon/xmon.c 		for_each_possible_cpu(cpu) {
cpu              1222 arch/powerpc/xmon/xmon.c 			if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
cpu              1223 arch/powerpc/xmon/xmon.c 				if (cpu == last_cpu + 1) {
cpu              1224 arch/powerpc/xmon/xmon.c 					last_cpu = cpu;
cpu              1228 arch/powerpc/xmon/xmon.c 					last_cpu = first_cpu = cpu;
cpu              1229 arch/powerpc/xmon/xmon.c 					printf(" 0x%lx", cpu);
cpu              1239 arch/powerpc/xmon/xmon.c 	if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) {
cpu              1240 arch/powerpc/xmon/xmon.c 		printf("cpu 0x%lx isn't in xmon\n", cpu);
cpu              1242 arch/powerpc/xmon/xmon.c 		printf("backtrace of paca[0x%lx].saved_r1 (possibly stale):\n", cpu);
cpu              1243 arch/powerpc/xmon/xmon.c 		xmon_show_stack(paca_ptrs[cpu]->saved_r1, 0, 0);
cpu              1249 arch/powerpc/xmon/xmon.c 	xmon_owner = cpu;
cpu              1258 arch/powerpc/xmon/xmon.c 			printf("cpu 0x%lx didn't take control\n", cpu);
cpu              2410 arch/powerpc/xmon/xmon.c static void dump_one_paca(int cpu)
cpu              2418 arch/powerpc/xmon/xmon.c 		printf("*** Error dumping paca for cpu 0x%x!\n", cpu);
cpu              2425 arch/powerpc/xmon/xmon.c 	p = paca_ptrs[cpu];
cpu              2427 arch/powerpc/xmon/xmon.c 	printf("paca for cpu 0x%x @ %px:\n", cpu, p);
cpu              2429 arch/powerpc/xmon/xmon.c 	printf(" %-*s = %s\n", 25, "possible", cpu_possible(cpu) ? "yes" : "no");
cpu              2430 arch/powerpc/xmon/xmon.c 	printf(" %-*s = %s\n", 25, "present", cpu_present(cpu) ? "yes" : "no");
cpu              2431 arch/powerpc/xmon/xmon.c 	printf(" %-*s = %s\n", 25, "online", cpu_online(cpu) ? "yes" : "no");
cpu              2550 arch/powerpc/xmon/xmon.c 	int cpu;
cpu              2557 arch/powerpc/xmon/xmon.c 	for_each_possible_cpu(cpu)
cpu              2558 arch/powerpc/xmon/xmon.c 		dump_one_paca(cpu);
cpu              2582 arch/powerpc/xmon/xmon.c static void dump_one_xive(int cpu)
cpu              2584 arch/powerpc/xmon/xmon.c 	unsigned int hwid = get_hard_smp_processor_id(cpu);
cpu              2598 arch/powerpc/xmon/xmon.c 		printf("*** Error dumping xive on cpu %d\n", cpu);
cpu              2604 arch/powerpc/xmon/xmon.c 	xmon_xive_do_dump(cpu);
cpu              2612 arch/powerpc/xmon/xmon.c 	int cpu;
cpu              2619 arch/powerpc/xmon/xmon.c 	for_each_possible_cpu(cpu)
cpu              2620 arch/powerpc/xmon/xmon.c 		dump_one_xive(cpu);
cpu              3473 arch/powerpc/xmon/xmon.c 	unsigned long addr, cpu;
cpu              3511 arch/powerpc/xmon/xmon.c 			if (scanhex(&cpu) && cpu < num_possible_cpus()) {
cpu              3512 arch/powerpc/xmon/xmon.c 				addr = (unsigned long)per_cpu_ptr(ptr, cpu);
cpu              3514 arch/powerpc/xmon/xmon.c 				cpu = raw_smp_processor_id();
cpu              3518 arch/powerpc/xmon/xmon.c 			printf("%s for cpu 0x%lx: %lx\n", tmp, cpu, addr);
cpu                23 arch/riscv/include/asm/smp.h #define cpuid_to_hartid_map(cpu)    __cpuid_to_hartid_map[cpu]
cpu                35 arch/riscv/include/asm/smp.h void arch_send_call_function_single_ipi(int cpu);
cpu                44 arch/riscv/include/asm/smp.h #define raw_smp_processor_id() (current_thread_info()->cpu)
cpu                59 arch/riscv/include/asm/smp.h static inline unsigned long cpuid_to_hartid_map(int cpu)
cpu                46 arch/riscv/include/asm/thread_info.h 	int			cpu;
cpu                37 arch/riscv/kernel/asm-offsets.c 	OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
cpu                19 arch/riscv/kernel/cacheinfo.c static int __init_cache_level(unsigned int cpu)
cpu                21 arch/riscv/kernel/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu                22 arch/riscv/kernel/cacheinfo.c 	struct device_node *np = of_cpu_device_node_get(cpu);
cpu                61 arch/riscv/kernel/cacheinfo.c static int __populate_cache_leaves(unsigned int cpu)
cpu                63 arch/riscv/kernel/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu                65 arch/riscv/kernel/cacheinfo.c 	struct device_node *np = of_cpu_device_node_get(cpu);
cpu                59 arch/riscv/kernel/smp.c 	int cpu;
cpu                62 arch/riscv/kernel/smp.c 	for_each_cpu(cpu, in)
cpu                63 arch/riscv/kernel/smp.c 		cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
cpu                66 arch/riscv/kernel/smp.c bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
cpu                68 arch/riscv/kernel/smp.c 	return phys_id == cpuid_to_hartid_map(cpu);
cpu                87 arch/riscv/kernel/smp.c 	int cpu;
cpu                90 arch/riscv/kernel/smp.c 	for_each_cpu(cpu, mask)
cpu                91 arch/riscv/kernel/smp.c 		set_bit(op, &ipi_data[cpu].bits);
cpu                98 arch/riscv/kernel/smp.c static void send_ipi_single(int cpu, enum ipi_message_type op)
cpu               100 arch/riscv/kernel/smp.c 	int hartid = cpuid_to_hartid_map(cpu);
cpu               103 arch/riscv/kernel/smp.c 	set_bit(op, &ipi_data[cpu].bits);
cpu               161 arch/riscv/kernel/smp.c 	unsigned int cpu, i;
cpu               166 arch/riscv/kernel/smp.c 		for_each_online_cpu(cpu)
cpu               167 arch/riscv/kernel/smp.c 			seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
cpu               177 arch/riscv/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               179 arch/riscv/kernel/smp.c 	send_ipi_single(cpu, IPI_CALL_FUNC);
cpu               207 arch/riscv/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               209 arch/riscv/kernel/smp.c 	send_ipi_single(cpu, IPI_RESCHEDULE);
cpu                99 arch/riscv/kernel/smpboot.c int __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu               102 arch/riscv/kernel/smpboot.c 	int hartid = cpuid_to_hartid_map(cpu);
cpu               103 arch/riscv/kernel/smpboot.c 	tidle->thread_info.cpu = cpu;
cpu               121 arch/riscv/kernel/smpboot.c 	if (!cpu_online(cpu)) {
cpu               122 arch/riscv/kernel/smpboot.c 		pr_crit("CPU%u: failed to come online\n", cpu);
cpu                17 arch/riscv/kernel/time.c 	struct device_node *cpu;
cpu                20 arch/riscv/kernel/time.c 	cpu = of_find_node_by_path("/cpus");
cpu                21 arch/riscv/kernel/time.c 	if (!cpu || of_property_read_u32(cpu, "timebase-frequency", &prop))
cpu                23 arch/riscv/kernel/time.c 	of_node_put(cpu);
cpu                31 arch/riscv/mm/cacheflush.c 	unsigned int cpu;
cpu                40 arch/riscv/mm/cacheflush.c 	cpu = smp_processor_id();
cpu                41 arch/riscv/mm/cacheflush.c 	cpumask_clear_cpu(cpu, mask);
cpu                48 arch/riscv/mm/cacheflush.c 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
cpu                27 arch/riscv/mm/context.c 	unsigned int cpu = smp_processor_id();
cpu                30 arch/riscv/mm/context.c 	if (cpumask_test_cpu(cpu, mask)) {
cpu                31 arch/riscv/mm/context.c 		cpumask_clear_cpu(cpu, mask);
cpu                46 arch/riscv/mm/context.c 	unsigned int cpu;
cpu                56 arch/riscv/mm/context.c 	cpu = smp_processor_id();
cpu                58 arch/riscv/mm/context.c 	cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpu                59 arch/riscv/mm/context.c 	cpumask_set_cpu(cpu, mm_cpumask(next));
cpu                33 arch/s390/hypfs/hypfs_diag0c.c 	unsigned int cpu_count, cpu, i;
cpu                49 arch/s390/hypfs/hypfs_diag0c.c 	for_each_online_cpu(cpu) {
cpu                50 arch/s390/hypfs/hypfs_diag0c.c 		diag0c_data->entry[i].cpu = cpu;
cpu                51 arch/s390/hypfs/hypfs_diag0c.c 		cpu_vec[cpu] = &diag0c_data->entry[i++];
cpu                34 arch/s390/include/asm/cputime.h u64 arch_cpu_idle_time(int cpu);
cpu                36 arch/s390/include/asm/cputime.h #define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
cpu               100 arch/s390/include/asm/kvm_host.h 	struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
cpu               108 arch/s390/include/asm/kvm_host.h 	struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
cpu               917 arch/s390/include/asm/kvm_host.h static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
cpu                91 arch/s390/include/asm/mmu_context.h 	int cpu = smp_processor_id();
cpu                94 arch/s390/include/asm/mmu_context.h 	cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
cpu               105 arch/s390/include/asm/mmu_context.h 		cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
cpu                34 arch/s390/include/asm/preempt.h #define init_idle_preempt_count(p, cpu)	do { \
cpu                93 arch/s390/include/asm/preempt.h #define init_idle_preempt_count(p, cpu)	do { \
cpu                69 arch/s390/include/asm/processor.h static inline int test_cpu_flag_of(int flag, int cpu)
cpu                71 arch/s390/include/asm/processor.h 	struct lowcore *lc = lowcore_ptr[cpu];
cpu                21 arch/s390/include/asm/smp.h extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
cpu                23 arch/s390/include/asm/smp.h extern void arch_send_call_function_single_ipi(int cpu);
cpu                31 arch/s390/include/asm/smp.h extern int smp_store_status(int cpu);
cpu                33 arch/s390/include/asm/smp.h extern int smp_vcpu_scheduled(int cpu);
cpu                34 arch/s390/include/asm/smp.h extern void smp_yield_cpu(int cpu);
cpu                35 arch/s390/include/asm/smp.h extern void smp_cpu_set_polarization(int cpu, int val);
cpu                36 arch/s390/include/asm/smp.h extern int smp_cpu_get_polarization(int cpu);
cpu                51 arch/s390/include/asm/smp.h static inline int smp_get_base_cpu(int cpu)
cpu                53 arch/s390/include/asm/smp.h 	return cpu - (cpu % (smp_cpu_mtid + 1));
cpu                58 arch/s390/include/asm/smp.h extern void __cpu_die(unsigned int cpu);
cpu                23 arch/s390/include/asm/spinlock.h bool arch_vcpu_is_preempted(int cpu);
cpu                41 arch/s390/include/asm/spinlock.h void arch_spin_lock_setup(int cpu);
cpu                43 arch/s390/include/asm/spinlock.h static inline u32 arch_spin_lockval(int cpu)
cpu                45 arch/s390/include/asm/spinlock.h 	return cpu + 1;
cpu               173 arch/s390/include/asm/sysinfo.h 	struct topology_core cpu;
cpu                 9 arch/s390/include/asm/topology.h struct cpu;
cpu                30 arch/s390/include/asm/topology.h #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
cpu                31 arch/s390/include/asm/topology.h #define topology_thread_id(cpu)		  (cpu_topology[cpu].thread_id)
cpu                32 arch/s390/include/asm/topology.h #define topology_sibling_cpumask(cpu)	  (&cpu_topology[cpu].thread_mask)
cpu                33 arch/s390/include/asm/topology.h #define topology_core_id(cpu)		  (cpu_topology[cpu].core_id)
cpu                34 arch/s390/include/asm/topology.h #define topology_core_cpumask(cpu)	  (&cpu_topology[cpu].core_mask)
cpu                35 arch/s390/include/asm/topology.h #define topology_book_id(cpu)		  (cpu_topology[cpu].book_id)
cpu                36 arch/s390/include/asm/topology.h #define topology_book_cpumask(cpu)	  (&cpu_topology[cpu].book_mask)
cpu                37 arch/s390/include/asm/topology.h #define topology_drawer_id(cpu)		  (cpu_topology[cpu].drawer_id)
cpu                38 arch/s390/include/asm/topology.h #define topology_drawer_cpumask(cpu)	  (&cpu_topology[cpu].drawer_mask)
cpu                39 arch/s390/include/asm/topology.h #define topology_cpu_dedicated(cpu)	  (cpu_topology[cpu].dedicated)
cpu                44 arch/s390/include/asm/topology.h int topology_cpu_init(struct cpu *);
cpu                49 arch/s390/include/asm/topology.h const struct cpumask *cpu_coregroup_mask(int cpu);
cpu                55 arch/s390/include/asm/topology.h static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
cpu                72 arch/s390/include/asm/topology.h static inline int cpu_to_node(int cpu)
cpu                74 arch/s390/include/asm/topology.h 	return cpu_topology[cpu].node_id;
cpu                46 arch/s390/include/uapi/asm/hypfs.h 	__u32	cpu;		/* Linux logical CPU number */
cpu               107 arch/s390/kernel/cache.c 			 enum cache_type type, unsigned int level, int cpu)
cpu               123 arch/s390/kernel/cache.c 	cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
cpu               128 arch/s390/kernel/cache.c int init_cache_level(unsigned int cpu)
cpu               130 arch/s390/kernel/cache.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu               152 arch/s390/kernel/cache.c int populate_cache_leaves(unsigned int cpu)
cpu               154 arch/s390/kernel/cache.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu               170 arch/s390/kernel/cache.c 			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
cpu               171 arch/s390/kernel/cache.c 			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
cpu               173 arch/s390/kernel/cache.c 			ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
cpu               358 arch/s390/kernel/crash_dump.c static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
cpu               368 arch/s390/kernel/crash_dump.c 	nt_prstatus.pr_pid = cpu;
cpu               587 arch/s390/kernel/crash_dump.c 	int cpu;
cpu               591 arch/s390/kernel/crash_dump.c 	cpu = 1;
cpu               594 arch/s390/kernel/crash_dump.c 			ptr = fill_cpu_elf_notes(ptr, cpu++, sa);
cpu                60 arch/s390/kernel/diag.c 	int cpu, prec, tmp;
cpu                66 arch/s390/kernel/diag.c 		for_each_online_cpu(cpu) {
cpu                68 arch/s390/kernel/diag.c 			for (tmp = 10; cpu >= tmp; tmp *= 10)
cpu                70 arch/s390/kernel/diag.c 			seq_printf(m, "%*s%d", prec, "CPU", cpu);
cpu                75 arch/s390/kernel/diag.c 		for_each_online_cpu(cpu) {
cpu                76 arch/s390/kernel/diag.c 			stat = &per_cpu(diag_stat, cpu);
cpu                96 arch/s390/kernel/idle.c u64 arch_cpu_idle_time(int cpu)
cpu                98 arch/s390/kernel/idle.c 	struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
cpu               125 arch/s390/kernel/irq.c 	int cpu;
cpu               134 arch/s390/kernel/irq.c 	for_each_online_cpu(cpu)
cpu               135 arch/s390/kernel/irq.c 		seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
cpu               155 arch/s390/kernel/irq.c 	int cpu, irq;
cpu               160 arch/s390/kernel/irq.c 		for_each_online_cpu(cpu)
cpu               161 arch/s390/kernel/irq.c 			seq_printf(p, "CPU%-8d", cpu);
cpu               167 arch/s390/kernel/irq.c 		for_each_online_cpu(cpu)
cpu               168 arch/s390/kernel/irq.c 			seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
cpu               179 arch/s390/kernel/irq.c 		for_each_online_cpu(cpu)
cpu               181 arch/s390/kernel/irq.c 				   per_cpu(irq_stat, cpu).irqs[irq]);
cpu               110 arch/s390/kernel/machine_kexec.c 	int this_cpu, cpu;
cpu               115 arch/s390/kernel/machine_kexec.c 	for_each_online_cpu(cpu) {
cpu               116 arch/s390/kernel/machine_kexec.c 		if (cpu == this_cpu)
cpu               118 arch/s390/kernel/machine_kexec.c 		if (smp_store_status(cpu))
cpu               155 arch/s390/kernel/perf_cpum_cf_common.c static int cpum_cf_setup(unsigned int cpu, int flags)
cpu               163 arch/s390/kernel/perf_cpum_cf_common.c static int cpum_cf_online_cpu(unsigned int cpu)
cpu               165 arch/s390/kernel/perf_cpum_cf_common.c 	return cpum_cf_setup(cpu, PMC_INIT);
cpu               168 arch/s390/kernel/perf_cpum_cf_common.c static int cpum_cf_offline_cpu(unsigned int cpu)
cpu               170 arch/s390/kernel/perf_cpum_cf_common.c 	return cpum_cf_setup(cpu, PMC_RELEASE);
cpu               187 arch/s390/kernel/perf_cpum_cf_diag.c 			    __func__, event, event->cpu,
cpu               205 arch/s390/kernel/perf_cpum_cf_diag.c 			    event, event->cpu);
cpu               248 arch/s390/kernel/perf_cpum_cf_diag.c 			    event, event->cpu, attr->config, event->pmu->type,
cpu               501 arch/s390/kernel/perf_cpum_cf_diag.c 		data.cpu_entry.cpu = event->cpu;
cpu               512 arch/s390/kernel/perf_cpum_cf_diag.c 			    "ov %d\n", __func__, event, event->cpu,
cpu               529 arch/s390/kernel/perf_cpum_cf_diag.c 			    __func__, event, event->cpu, flags, hwc->state);
cpu               552 arch/s390/kernel/perf_cpum_cf_diag.c 			    __func__, event, event->cpu, flags, hwc->state);
cpu               571 arch/s390/kernel/perf_cpum_cf_diag.c 			    __func__, event, event->cpu, flags, cpuhw);
cpu               594 arch/s390/kernel/perf_cpum_cf_diag.c 			   __func__, event, event->cpu, flags);
cpu               760 arch/s390/kernel/perf_cpum_sf.c 			    "cpu:%d period:%llx freq:%d,%#lx\n", event->cpu,
cpu               772 arch/s390/kernel/perf_cpum_sf.c 	int cpu, err;
cpu               800 arch/s390/kernel/perf_cpum_sf.c 	if (event->cpu == -1)
cpu               806 arch/s390/kernel/perf_cpum_sf.c 		cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
cpu               867 arch/s390/kernel/perf_cpum_sf.c 		for_each_online_cpu(cpu) {
cpu               868 arch/s390/kernel/perf_cpum_sf.c 			cpuhw = &per_cpu(cpu_hw_sf, cpu);
cpu               916 arch/s390/kernel/perf_cpum_sf.c 	if (event->cpu >= 0 && !cpu_online(event->cpu))
cpu              1806 arch/s390/kernel/perf_cpum_sf.c 	if (event->cpu == -1) {
cpu              1813 arch/s390/kernel/perf_cpum_sf.c 		struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
cpu              1828 arch/s390/kernel/perf_cpum_sf.c 			    event->cpu, value,
cpu              2082 arch/s390/kernel/perf_cpum_sf.c static int cpusf_pmu_setup(unsigned int cpu, int flags)
cpu              2096 arch/s390/kernel/perf_cpum_sf.c static int s390_pmu_sf_online_cpu(unsigned int cpu)
cpu              2098 arch/s390/kernel/perf_cpum_sf.c 	return cpusf_pmu_setup(cpu, PMC_INIT);
cpu              2101 arch/s390/kernel/perf_cpum_sf.c static int s390_pmu_sf_offline_cpu(unsigned int cpu)
cpu              2103 arch/s390/kernel/perf_cpum_sf.c 	return cpusf_pmu_setup(cpu, PMC_RELEASE);
cpu               125 arch/s390/kernel/perf_event.c 	int cpu = smp_processor_id();
cpu               130 arch/s390/kernel/perf_event.c 			cpu, cf_info.cfvn, cf_info.csvn,
cpu               137 arch/s390/kernel/perf_event.c 	int cpu = smp_processor_id();
cpu               144 arch/s390/kernel/perf_event.c 		cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
cpu               149 arch/s390/kernel/perf_event.c 			" bsdes=%i tear=%016lx dear=%016lx\n", cpu,
cpu               153 arch/s390/kernel/perf_event.c 			" dsdes=%i tear=%016lx dear=%016lx\n", cpu,
cpu                65 arch/s390/kernel/processor.c 	int cpu, this_cpu;
cpu                70 arch/s390/kernel/processor.c 		cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
cpu                71 arch/s390/kernel/processor.c 		if (cpu >= nr_cpu_ids)
cpu                73 arch/s390/kernel/processor.c 		if (arch_vcpu_is_preempted(cpu))
cpu                74 arch/s390/kernel/processor.c 			smp_yield_cpu(cpu);
cpu               125 arch/s390/kernel/processor.c 	int i, cpu;
cpu               143 arch/s390/kernel/processor.c 	for_each_online_cpu(cpu) {
cpu               144 arch/s390/kernel/processor.c 		struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
cpu               150 arch/s390/kernel/processor.c 			   cpu, id->version, id->ident, id->machine);
cpu                71 arch/s390/kernel/smp.c static DEFINE_PER_CPU(struct cpu *, cpu_device);
cpu               170 arch/s390/kernel/smp.c 	int cpu;
cpu               172 arch/s390/kernel/smp.c 	for_each_cpu(cpu, mask)
cpu               173 arch/s390/kernel/smp.c 		if (pcpu_devices[cpu].address == address)
cpu               174 arch/s390/kernel/smp.c 			return pcpu_devices + cpu;
cpu               189 arch/s390/kernel/smp.c static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
cpu               211 arch/s390/kernel/smp.c 	lc->cpu_nr = cpu;
cpu               212 arch/s390/kernel/smp.c 	lc->spinlock_lockval = arch_spin_lockval(cpu);
cpu               221 arch/s390/kernel/smp.c 	lowcore_ptr[cpu] = lc;
cpu               256 arch/s390/kernel/smp.c static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
cpu               260 arch/s390/kernel/smp.c 	cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpu               261 arch/s390/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
cpu               262 arch/s390/kernel/smp.c 	lc->cpu_nr = cpu;
cpu               263 arch/s390/kernel/smp.c 	lc->spinlock_lockval = arch_spin_lockval(cpu);
cpu               265 arch/s390/kernel/smp.c 	lc->percpu_offset = __per_cpu_offset[cpu];
cpu               279 arch/s390/kernel/smp.c 	arch_spin_lock_setup(cpu);
cpu               398 arch/s390/kernel/smp.c 	int cpu;
cpu               400 arch/s390/kernel/smp.c 	for_each_present_cpu(cpu)
cpu               401 arch/s390/kernel/smp.c 		if (pcpu_devices[cpu].address == address)
cpu               402 arch/s390/kernel/smp.c 			return cpu;
cpu               406 arch/s390/kernel/smp.c bool notrace arch_vcpu_is_preempted(int cpu)
cpu               408 arch/s390/kernel/smp.c 	if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
cpu               410 arch/s390/kernel/smp.c 	if (pcpu_running(pcpu_devices + cpu))
cpu               416 arch/s390/kernel/smp.c void notrace smp_yield_cpu(int cpu)
cpu               421 arch/s390/kernel/smp.c 			     : : "d" (pcpu_devices[cpu].address));
cpu               436 arch/s390/kernel/smp.c 	int cpu;
cpu               442 arch/s390/kernel/smp.c 	for_each_cpu(cpu, &cpumask) {
cpu               443 arch/s390/kernel/smp.c 		struct pcpu *pcpu = pcpu_devices + cpu;
cpu               451 arch/s390/kernel/smp.c 		for_each_cpu(cpu, &cpumask)
cpu               452 arch/s390/kernel/smp.c 			if (pcpu_stopped(pcpu_devices + cpu))
cpu               453 arch/s390/kernel/smp.c 				cpumask_clear_cpu(cpu, &cpumask);
cpu               466 arch/s390/kernel/smp.c 	int cpu;
cpu               478 arch/s390/kernel/smp.c 	for_each_online_cpu(cpu) {
cpu               479 arch/s390/kernel/smp.c 		if (cpu == smp_processor_id())
cpu               481 arch/s390/kernel/smp.c 		pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
cpu               482 arch/s390/kernel/smp.c 		while (!pcpu_stopped(pcpu_devices + cpu))
cpu               514 arch/s390/kernel/smp.c 	int cpu;
cpu               516 arch/s390/kernel/smp.c 	for_each_cpu(cpu, mask)
cpu               517 arch/s390/kernel/smp.c 		pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
cpu               520 arch/s390/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               522 arch/s390/kernel/smp.c 	pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
cpu               530 arch/s390/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               532 arch/s390/kernel/smp.c 	pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
cpu               581 arch/s390/kernel/smp.c int smp_store_status(int cpu)
cpu               583 arch/s390/kernel/smp.c 	struct pcpu *pcpu = pcpu_devices + cpu;
cpu               699 arch/s390/kernel/smp.c void smp_cpu_set_polarization(int cpu, int val)
cpu               701 arch/s390/kernel/smp.c 	pcpu_devices[cpu].polarization = val;
cpu               704 arch/s390/kernel/smp.c int smp_cpu_get_polarization(int cpu)
cpu               706 arch/s390/kernel/smp.c 	return pcpu_devices[cpu].polarization;
cpu               730 arch/s390/kernel/smp.c static int smp_add_present_cpu(int cpu);
cpu               736 arch/s390/kernel/smp.c 	int cpu, nr, i;
cpu               742 arch/s390/kernel/smp.c 	cpu = cpumask_first(avail);
cpu               744 arch/s390/kernel/smp.c 	for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
cpu               747 arch/s390/kernel/smp.c 		pcpu = pcpu_devices + cpu;
cpu               753 arch/s390/kernel/smp.c 		smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
cpu               754 arch/s390/kernel/smp.c 		set_cpu_present(cpu, true);
cpu               755 arch/s390/kernel/smp.c 		if (!early && smp_add_present_cpu(cpu) != 0)
cpu               756 arch/s390/kernel/smp.c 			set_cpu_present(cpu, false);
cpu               759 arch/s390/kernel/smp.c 		cpumask_clear_cpu(cpu, avail);
cpu               760 arch/s390/kernel/smp.c 		cpu = cpumask_next(cpu, avail);
cpu               798 arch/s390/kernel/smp.c 	unsigned int cpu, mtid, c_cpus, s_cpus;
cpu               811 arch/s390/kernel/smp.c 		for (cpu = 0; cpu < info->combined; cpu++)
cpu               812 arch/s390/kernel/smp.c 			if (info->core[cpu].core_id == address) {
cpu               814 arch/s390/kernel/smp.c 				boot_core_type = info->core[cpu].type;
cpu               817 arch/s390/kernel/smp.c 		if (cpu >= info->combined)
cpu               828 arch/s390/kernel/smp.c 	for (cpu = 0; cpu < info->combined; cpu++) {
cpu               830 arch/s390/kernel/smp.c 		    info->core[cpu].type != boot_core_type)
cpu               832 arch/s390/kernel/smp.c 		if (cpu < info->configured)
cpu               848 arch/s390/kernel/smp.c 	int cpu = smp_processor_id();
cpu               860 arch/s390/kernel/smp.c 	if (topology_cpu_dedicated(cpu))
cpu               885 arch/s390/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu               890 arch/s390/kernel/smp.c 	pcpu = pcpu_devices + cpu;
cpu               893 arch/s390/kernel/smp.c 	base = smp_get_base_cpu(cpu);
cpu               908 arch/s390/kernel/smp.c 	rc = pcpu_alloc_lowcore(pcpu, cpu);
cpu               911 arch/s390/kernel/smp.c 	pcpu_prepare_secondary(pcpu, cpu);
cpu               915 arch/s390/kernel/smp.c 	while (!cpu_online(cpu))
cpu               948 arch/s390/kernel/smp.c void __cpu_die(unsigned int cpu)
cpu               953 arch/s390/kernel/smp.c 	pcpu = pcpu_devices + cpu;
cpu               957 arch/s390/kernel/smp.c 	cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
cpu               958 arch/s390/kernel/smp.c 	cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpu               971 arch/s390/kernel/smp.c 	unsigned int possible, sclp_max, cpu;
cpu               978 arch/s390/kernel/smp.c 	for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
cpu               979 arch/s390/kernel/smp.c 		set_cpu_possible(cpu, true);
cpu              1042 arch/s390/kernel/smp.c 	int cpu, val, rc, i;
cpu              1053 arch/s390/kernel/smp.c 	cpu = dev->id;
cpu              1054 arch/s390/kernel/smp.c 	cpu = smp_get_base_cpu(cpu);
cpu              1055 arch/s390/kernel/smp.c 	if (cpu == 0)
cpu              1058 arch/s390/kernel/smp.c 		if (cpu_online(cpu + i))
cpu              1060 arch/s390/kernel/smp.c 	pcpu = pcpu_devices + cpu;
cpu              1070 arch/s390/kernel/smp.c 			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
cpu              1073 arch/s390/kernel/smp.c 			smp_cpu_set_polarization(cpu + i,
cpu              1085 arch/s390/kernel/smp.c 			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
cpu              1088 arch/s390/kernel/smp.c 			smp_cpu_set_polarization(cpu + i,
cpu              1130 arch/s390/kernel/smp.c static int smp_cpu_online(unsigned int cpu)
cpu              1132 arch/s390/kernel/smp.c 	struct device *s = &per_cpu(cpu_device, cpu)->dev;
cpu              1136 arch/s390/kernel/smp.c static int smp_cpu_pre_down(unsigned int cpu)
cpu              1138 arch/s390/kernel/smp.c 	struct device *s = &per_cpu(cpu_device, cpu)->dev;
cpu              1144 arch/s390/kernel/smp.c static int smp_add_present_cpu(int cpu)
cpu              1147 arch/s390/kernel/smp.c 	struct cpu *c;
cpu              1153 arch/s390/kernel/smp.c 	per_cpu(cpu_device, cpu) = c;
cpu              1156 arch/s390/kernel/smp.c 	rc = register_cpu(c, cpu);
cpu              1213 arch/s390/kernel/smp.c 	int cpu, rc = 0;
cpu              1218 arch/s390/kernel/smp.c 	for_each_present_cpu(cpu) {
cpu              1219 arch/s390/kernel/smp.c 		rc = smp_add_present_cpu(cpu);
cpu               162 arch/s390/kernel/time.c 	int cpu;
cpu               167 arch/s390/kernel/time.c 	cpu = smp_processor_id();
cpu               168 arch/s390/kernel/time.c 	cd = &per_cpu(comparators, cpu);
cpu               178 arch/s390/kernel/time.c 	cd->cpumask		= cpumask_of(cpu);
cpu                68 arch/s390/kernel/topology.c static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
cpu                72 arch/s390/kernel/topology.c 	cpumask_copy(&mask, cpumask_of(cpu));
cpu                76 arch/s390/kernel/topology.c 			if (cpumask_test_cpu(cpu, &info->mask)) {
cpu                83 arch/s390/kernel/topology.c 			cpumask_copy(&mask, cpumask_of(cpu));
cpu                91 arch/s390/kernel/topology.c 		cpumask_copy(&mask, cpumask_of(cpu));
cpu                97 arch/s390/kernel/topology.c static cpumask_t cpu_thread_map(unsigned int cpu)
cpu               102 arch/s390/kernel/topology.c 	cpumask_copy(&mask, cpumask_of(cpu));
cpu               105 arch/s390/kernel/topology.c 	cpu -= cpu % (smp_cpu_mtid + 1);
cpu               107 arch/s390/kernel/topology.c 		if (cpu_present(cpu + i))
cpu               108 arch/s390/kernel/topology.c 			cpumask_set_cpu(cpu + i, &mask);
cpu               200 arch/s390/kernel/topology.c 			add_cpus_to_mask(&tle->cpu, drawer, book, socket);
cpu               212 arch/s390/kernel/topology.c 	int cpu;
cpu               214 arch/s390/kernel/topology.c 	for_each_possible_cpu(cpu)
cpu               215 arch/s390/kernel/topology.c 		smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
cpu               233 arch/s390/kernel/topology.c 	int cpu, rc;
cpu               243 arch/s390/kernel/topology.c 	for_each_possible_cpu(cpu)
cpu               244 arch/s390/kernel/topology.c 		smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
cpu               251 arch/s390/kernel/topology.c 	int cpu, id;
cpu               253 arch/s390/kernel/topology.c 	for_each_possible_cpu(cpu) {
cpu               254 arch/s390/kernel/topology.c 		topo = &cpu_topology[cpu];
cpu               255 arch/s390/kernel/topology.c 		topo->thread_mask = cpu_thread_map(cpu);
cpu               256 arch/s390/kernel/topology.c 		topo->core_mask = cpu_group_map(&socket_info, cpu);
cpu               257 arch/s390/kernel/topology.c 		topo->book_mask = cpu_group_map(&book_info, cpu);
cpu               258 arch/s390/kernel/topology.c 		topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
cpu               260 arch/s390/kernel/topology.c 			id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
cpu               261 arch/s390/kernel/topology.c 			topo->thread_id = cpu;
cpu               262 arch/s390/kernel/topology.c 			topo->core_id = cpu;
cpu               266 arch/s390/kernel/topology.c 			if (cpu_present(cpu))
cpu               267 arch/s390/kernel/topology.c 				cpumask_set_cpu(cpu, &cpus_with_topology);
cpu               308 arch/s390/kernel/topology.c 	int cpu, rc;
cpu               312 arch/s390/kernel/topology.c 	for_each_online_cpu(cpu) {
cpu               313 arch/s390/kernel/topology.c 		dev = get_cpu_device(cpu);
cpu               413 arch/s390/kernel/topology.c 	int cpu = dev->id;
cpu               417 arch/s390/kernel/topology.c 	switch (smp_cpu_get_polarization(cpu)) {
cpu               451 arch/s390/kernel/topology.c 	int cpu = dev->id;
cpu               455 arch/s390/kernel/topology.c 	count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
cpu               470 arch/s390/kernel/topology.c int topology_cpu_init(struct cpu *cpu)
cpu               474 arch/s390/kernel/topology.c 	rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
cpu               477 arch/s390/kernel/topology.c 	rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
cpu               479 arch/s390/kernel/topology.c 		sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
cpu               483 arch/s390/kernel/topology.c static const struct cpumask *cpu_thread_mask(int cpu)
cpu               485 arch/s390/kernel/topology.c 	return &cpu_topology[cpu].thread_mask;
cpu               489 arch/s390/kernel/topology.c const struct cpumask *cpu_coregroup_mask(int cpu)
cpu               491 arch/s390/kernel/topology.c 	return &cpu_topology[cpu].core_mask;
cpu               494 arch/s390/kernel/topology.c static const struct cpumask *cpu_book_mask(int cpu)
cpu               496 arch/s390/kernel/topology.c 	return &cpu_topology[cpu].book_mask;
cpu               499 arch/s390/kernel/topology.c static const struct cpumask *cpu_drawer_mask(int cpu)
cpu               501 arch/s390/kernel/topology.c 	return &cpu_topology[cpu].drawer_mask;
cpu                54 arch/s390/kvm/interrupt.c 			sca->cpu[vcpu->vcpu_id].sigp_ctrl;
cpu                61 arch/s390/kvm/interrupt.c 			sca->cpu[vcpu->vcpu_id].sigp_ctrl;
cpu                83 arch/s390/kvm/interrupt.c 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
cpu                95 arch/s390/kvm/interrupt.c 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
cpu               126 arch/s390/kvm/interrupt.c 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
cpu               134 arch/s390/kvm/interrupt.c 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
cpu              2595 arch/s390/kvm/kvm-s390.c 		sca->cpu[vcpu->vcpu_id].sda = 0;
cpu              2600 arch/s390/kvm/kvm-s390.c 		sca->cpu[vcpu->vcpu_id].sda = 0;
cpu              2619 arch/s390/kvm/kvm-s390.c 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
cpu              2627 arch/s390/kvm/kvm-s390.c 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
cpu              2650 arch/s390/kvm/kvm-s390.c 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
cpu              2822 arch/s390/kvm/kvm-s390.c 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
cpu              2832 arch/s390/kvm/kvm-s390.c void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu              2839 arch/s390/kvm/kvm-s390.c 	vcpu->cpu = cpu;
cpu              2844 arch/s390/kvm/kvm-s390.c 	vcpu->cpu = -1;
cpu                60 arch/s390/lib/spinlock.c void arch_spin_lock_setup(int cpu)
cpu                65 arch/s390/lib/spinlock.c 	node = per_cpu_ptr(&spin_wait[0], cpu);
cpu                68 arch/s390/lib/spinlock.c 		node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
cpu                99 arch/s390/lib/spinlock.c 	int ix, cpu;
cpu               102 arch/s390/lib/spinlock.c 	cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
cpu               103 arch/s390/lib/spinlock.c 	return per_cpu_ptr(&spin_wait[ix], cpu - 1);
cpu               255 arch/s390/lib/spinlock.c 	int cpu = SPINLOCK_LOCKVAL;
cpu               262 arch/s390/lib/spinlock.c 			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
cpu               316 arch/s390/lib/spinlock.c 	int cpu;
cpu               318 arch/s390/lib/spinlock.c 	cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
cpu               319 arch/s390/lib/spinlock.c 	if (!cpu)
cpu               321 arch/s390/lib/spinlock.c 	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
cpu               323 arch/s390/lib/spinlock.c 	smp_yield_cpu(cpu - 1);
cpu               778 arch/s390/mm/fault.c static int pfault_cpu_dead(unsigned int cpu)
cpu               196 arch/s390/mm/maccess.c 	int cpu;
cpu               200 arch/s390/mm/maccess.c 	for_each_online_cpu(cpu) {
cpu               201 arch/s390/mm/maccess.c 		lc = (unsigned long) lowcore_ptr[cpu];
cpu               355 arch/s390/numa/mode_emu.c 	int cpu;
cpu               359 arch/s390/numa/mode_emu.c 	for_each_cpu(cpu, &cpus_with_topology) {
cpu               360 arch/s390/numa/mode_emu.c 		top = &cpu_topology[cpu];
cpu               365 arch/s390/numa/mode_emu.c 		core = toptree_get_child(mc, smp_get_base_cpu(cpu));
cpu               368 arch/s390/numa/mode_emu.c 		cpumask_set_cpu(cpu, &core->mask);
cpu               380 arch/s390/numa/mode_emu.c 	int cpu;
cpu               382 arch/s390/numa/mode_emu.c 	for_each_cpu(cpu, &core->mask) {
cpu               383 arch/s390/numa/mode_emu.c 		top = &cpu_topology[cpu];
cpu               388 arch/s390/numa/mode_emu.c 		cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
cpu               432 arch/s390/numa/mode_emu.c 	int core_id, node_id, cpu;
cpu               439 arch/s390/numa/mode_emu.c 	for_each_possible_cpu(cpu) {
cpu               440 arch/s390/numa/mode_emu.c 		core_id = smp_get_base_cpu(cpu);
cpu               444 arch/s390/numa/mode_emu.c 		cpu_topology[cpu].node_id = node_id;
cpu               162 arch/s390/pci/pci_irq.c 	unsigned long cpu;
cpu               165 arch/s390/pci/pci_irq.c 	for (cpu = 0;;) {
cpu               166 arch/s390/pci/pci_irq.c 		cpu = airq_iv_scan(zpci_sbv, cpu, airq_iv_end(zpci_sbv));
cpu               167 arch/s390/pci/pci_irq.c 		if (cpu == -1UL) {
cpu               174 arch/s390/pci/pci_irq.c 			cpu = 0;
cpu               177 arch/s390/pci/pci_irq.c 		cpu_data = &per_cpu(irq_data, cpu);
cpu               184 arch/s390/pci/pci_irq.c 		smp_call_function_single_async(cpu, &cpu_data->csd);
cpu               237 arch/s390/pci/pci_irq.c 	unsigned int hwirq, msi_vecs, cpu;
cpu               293 arch/s390/pci/pci_irq.c 			for_each_possible_cpu(cpu) {
cpu               294 arch/s390/pci/pci_irq.c 				airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
cpu               380 arch/s390/pci/pci_irq.c 	unsigned int cpu;
cpu               396 arch/s390/pci/pci_irq.c 	for_each_possible_cpu(cpu) {
cpu               401 arch/s390/pci/pci_irq.c 		zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE,
cpu               404 arch/s390/pci/pci_irq.c 					       (!cpu ? AIRQ_IV_ALLOC : 0));
cpu               405 arch/s390/pci/pci_irq.c 		if (!zpci_ibv[cpu])
cpu               476 arch/s390/pci/pci_irq.c 	unsigned int cpu;
cpu               479 arch/s390/pci/pci_irq.c 		for_each_possible_cpu(cpu) {
cpu               480 arch/s390/pci/pci_irq.c 			airq_iv_release(zpci_ibv[cpu]);
cpu                26 arch/sh/boards/of-generic.c static void dummy_start_cpu(unsigned int cpu, unsigned long entry_point)
cpu                35 arch/sh/boards/of-generic.c static void dummy_send_ipi(unsigned int cpu, unsigned int message)
cpu                52 arch/sh/include/asm/irq.h extern void irq_ctx_init(int cpu);
cpu                53 arch/sh/include/asm/irq.h extern void irq_ctx_exit(int cpu);
cpu                56 arch/sh/include/asm/irq.h # define irq_ctx_init(cpu) do { } while (0)
cpu                57 arch/sh/include/asm/irq.h # define irq_ctx_exit(cpu) do { } while (0)
cpu                38 arch/sh/include/asm/mmu_context.h #define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
cpu                41 arch/sh/include/asm/mmu_context.h #define cpu_context(cpu, mm)	((mm)->context.id[cpu])
cpu                43 arch/sh/include/asm/mmu_context.h #define cpu_asid(cpu, mm)	\
cpu                44 arch/sh/include/asm/mmu_context.h 	(cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
cpu                60 arch/sh/include/asm/mmu_context.h static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
cpu                62 arch/sh/include/asm/mmu_context.h 	unsigned long asid = asid_cache(cpu);
cpu                65 arch/sh/include/asm/mmu_context.h 	if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
cpu                93 arch/sh/include/asm/mmu_context.h 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
cpu               115 arch/sh/include/asm/mmu_context.h static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
cpu               117 arch/sh/include/asm/mmu_context.h 	get_mmu_context(mm, cpu);
cpu               118 arch/sh/include/asm/mmu_context.h 	set_asid(cpu_asid(cpu, mm));
cpu               125 arch/sh/include/asm/mmu_context.h 	unsigned int cpu = smp_processor_id();
cpu               128 arch/sh/include/asm/mmu_context.h 		cpumask_set_cpu(cpu, mm_cpumask(next));
cpu               130 arch/sh/include/asm/mmu_context.h 		activate_context(next, cpu);
cpu               132 arch/sh/include/asm/mmu_context.h 		if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
cpu               133 arch/sh/include/asm/mmu_context.h 			activate_context(next, cpu);
cpu               144 arch/sh/include/asm/mmu_context.h #define cpu_asid(cpu, mm)		({ (void)cpu; NO_CONTEXT; })
cpu               161 arch/sh/include/asm/mmu_context.h 	unsigned int cpu = smp_processor_id();
cpu               167 arch/sh/include/asm/mmu_context.h 	if (asid_cache(cpu) == NO_CONTEXT)
cpu               168 arch/sh/include/asm/mmu_context.h 		asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
cpu               170 arch/sh/include/asm/mmu_context.h 	set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
cpu                 9 arch/sh/include/asm/smp-ops.h 	void (*start_cpu)(unsigned int cpu, unsigned long entry_point);
cpu                10 arch/sh/include/asm/smp-ops.h 	void (*send_ipi)(unsigned int cpu, unsigned int message);
cpu                11 arch/sh/include/asm/smp-ops.h 	int (*cpu_disable)(unsigned int cpu);
cpu                12 arch/sh/include/asm/smp-ops.h 	void (*cpu_die)(unsigned int cpu);
cpu                16 arch/sh/include/asm/smp.h #define raw_smp_processor_id()	(current_thread_info()->cpu)
cpu                20 arch/sh/include/asm/smp.h #define cpu_number_map(cpu)  __cpu_number_map[cpu]
cpu                24 arch/sh/include/asm/smp.h #define cpu_logical_map(cpu)  __cpu_logical_map[cpu]
cpu                39 arch/sh/include/asm/smp.h void arch_send_call_function_single_ipi(int cpu);
cpu                43 arch/sh/include/asm/smp.h void native_cpu_die(unsigned int cpu);
cpu                44 arch/sh/include/asm/smp.h int native_cpu_disable(unsigned int cpu);
cpu                50 arch/sh/include/asm/smp.h static inline void __cpu_die(unsigned int cpu)
cpu                54 arch/sh/include/asm/smp.h 	mp_ops->cpu_die(cpu);
cpu                33 arch/sh/include/asm/thread_info.h 	__u32			cpu;
cpu                61 arch/sh/include/asm/thread_info.h 	.cpu		= 0,			\
cpu                 7 arch/sh/include/asm/topology.h #define cpu_to_node(cpu)	((void)(cpu),0)
cpu                20 arch/sh/include/asm/topology.h const struct cpumask *cpu_coregroup_mask(int cpu);
cpu                24 arch/sh/include/asm/topology.h #define topology_core_cpumask(cpu)	(&cpu_core_map[cpu])
cpu                26 arch/sh/kernel/asm-offsets.c 	DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
cpu               297 arch/sh/kernel/cpu/init.c 	current_thread_info()->cpu = hard_smp_processor_id();
cpu                83 arch/sh/kernel/cpu/proc.c 	unsigned int cpu = c - cpu_data;
cpu                85 arch/sh/kernel/cpu/proc.c 	if (!cpu_online(cpu))
cpu                88 arch/sh/kernel/cpu/proc.c 	if (cpu == 0)
cpu                93 arch/sh/kernel/cpu/proc.c 	seq_printf(m, "processor\t: %d\n", cpu);
cpu                44 arch/sh/kernel/cpu/sh2/probe.c 	unsigned cpu = hard_smp_processor_id();
cpu                46 arch/sh/kernel/cpu/sh2/probe.c 	unsigned cpu = 0;
cpu                48 arch/sh/kernel/cpu/sh2/probe.c 	if (cpu == 0) of_scan_flat_dt(scan_cache, NULL);
cpu                49 arch/sh/kernel/cpu/sh2/probe.c 	if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu);
cpu                50 arch/sh/kernel/cpu/sh2/probe.c 	if (cpu != 0) return;
cpu                23 arch/sh/kernel/cpu/sh2/smp-j2.c 	unsigned cpu = hard_smp_processor_id();
cpu                24 arch/sh/kernel/cpu/sh2/smp-j2.c 	volatile unsigned *pmsg = &per_cpu(j2_ipi_messages, cpu);
cpu                79 arch/sh/kernel/cpu/sh2/smp-j2.c static void j2_start_cpu(unsigned int cpu, unsigned long entry_point)
cpu                85 arch/sh/kernel/cpu/sh2/smp-j2.c 	if (!cpu) return;
cpu                87 arch/sh/kernel/cpu/sh2/smp-j2.c 	np = of_get_cpu_node(cpu, NULL);
cpu               100 arch/sh/kernel/cpu/sh2/smp-j2.c 	pr_info("J2 SMP: requested start of cpu %u\n", cpu);
cpu               108 arch/sh/kernel/cpu/sh2/smp-j2.c static void j2_send_ipi(unsigned int cpu, unsigned int message)
cpu               116 arch/sh/kernel/cpu/sh2/smp-j2.c 	pmsg = &per_cpu(j2_ipi_messages, cpu);
cpu               121 arch/sh/kernel/cpu/sh2/smp-j2.c 	val = __raw_readl(j2_ipi_trigger + cpu);
cpu               122 arch/sh/kernel/cpu/sh2/smp-j2.c 	__raw_writel(val | (1U<<28), j2_ipi_trigger + cpu);
cpu               339 arch/sh/kernel/cpu/sh4/sq.c 	unsigned int cpu = dev->id;
cpu               343 arch/sh/kernel/cpu/sh4/sq.c 	sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
cpu               344 arch/sh/kernel/cpu/sh4/sq.c 	if (unlikely(!sq_kobject[cpu]))
cpu               347 arch/sh/kernel/cpu/sh4/sq.c 	kobj = sq_kobject[cpu];
cpu               357 arch/sh/kernel/cpu/sh4/sq.c 	unsigned int cpu = dev->id;
cpu               358 arch/sh/kernel/cpu/sh4/sq.c 	struct kobject *kobj = sq_kobject[cpu];
cpu                30 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	unsigned int cpu = hard_smp_processor_id();
cpu                31 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	unsigned int offs = 4 * cpu;
cpu                45 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	unsigned int cpu = 0;
cpu                48 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	init_cpu_possible(cpumask_of(cpu));
cpu                51 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	__raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu));
cpu                83 arch/sh/kernel/cpu/sh4a/smp-shx3.c static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point)
cpu                86 arch/sh/kernel/cpu/sh4a/smp-shx3.c 		__raw_writel(entry_point, RESET_REG(cpu));
cpu                88 arch/sh/kernel/cpu/sh4a/smp-shx3.c 		__raw_writel(virt_to_phys(entry_point), RESET_REG(cpu));
cpu                90 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
cpu                91 arch/sh/kernel/cpu/sh4a/smp-shx3.c 		__raw_writel(STBCR_MSTP, STBCR_REG(cpu));
cpu                93 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
cpu                97 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	__raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
cpu               105 arch/sh/kernel/cpu/sh4a/smp-shx3.c static void shx3_send_ipi(unsigned int cpu, unsigned int message)
cpu               107 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	unsigned long addr = 0xfe410070 + (cpu * 4);
cpu               109 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	BUG_ON(cpu >= 4);
cpu               114 arch/sh/kernel/cpu/sh4a/smp-shx3.c static void shx3_update_boot_vector(unsigned int cpu)
cpu               116 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	__raw_writel(STBCR_MSTP, STBCR_REG(cpu));
cpu               117 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
cpu               119 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	__raw_writel(STBCR_RESET, STBCR_REG(cpu));
cpu               122 arch/sh/kernel/cpu/sh4a/smp-shx3.c static int shx3_cpu_prepare(unsigned int cpu)
cpu               124 arch/sh/kernel/cpu/sh4a/smp-shx3.c 	shx3_update_boot_vector(cpu);
cpu               275 arch/sh/kernel/hw_breakpoint.c 	int cpu, i, rc = NOTIFY_STOP;
cpu               296 arch/sh/kernel/hw_breakpoint.c 	cpu = get_cpu();
cpu               311 arch/sh/kernel/hw_breakpoint.c 		bp = per_cpu(bp_per_reg[i], cpu);
cpu               119 arch/sh/kernel/irq.c void irq_ctx_init(int cpu)
cpu               123 arch/sh/kernel/irq.c 	if (hardirq_ctx[cpu])
cpu               126 arch/sh/kernel/irq.c 	irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
cpu               128 arch/sh/kernel/irq.c 	irqctx->tinfo.cpu		= cpu;
cpu               132 arch/sh/kernel/irq.c 	hardirq_ctx[cpu] = irqctx;
cpu               134 arch/sh/kernel/irq.c 	irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
cpu               136 arch/sh/kernel/irq.c 	irqctx->tinfo.cpu		= cpu;
cpu               140 arch/sh/kernel/irq.c 	softirq_ctx[cpu] = irqctx;
cpu               143 arch/sh/kernel/irq.c 		cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
cpu               146 arch/sh/kernel/irq.c void irq_ctx_exit(int cpu)
cpu               148 arch/sh/kernel/irq.c 	hardirq_ctx[cpu] = NULL;
cpu               226 arch/sh/kernel/irq.c 	unsigned int irq, cpu = smp_processor_id();
cpu               231 arch/sh/kernel/irq.c 		if (irq_data_get_node(data) == cpu) {
cpu               237 arch/sh/kernel/irq.c 						    irq, cpu);
cpu               352 arch/sh/kernel/perf_event.c static int sh_pmu_prepare_cpu(unsigned int cpu)
cpu               354 arch/sh/kernel/perf_event.c 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
cpu                47 arch/sh/kernel/smp.c static inline void smp_store_cpu_info(unsigned int cpu)
cpu                49 arch/sh/kernel/smp.c 	struct sh_cpuinfo *c = cpu_data + cpu;
cpu                58 arch/sh/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu                61 arch/sh/kernel/smp.c 	current_thread_info()->cpu = cpu;
cpu                71 arch/sh/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu                73 arch/sh/kernel/smp.c 	__cpu_number_map[0] = cpu;
cpu                74 arch/sh/kernel/smp.c 	__cpu_logical_map[0] = cpu;
cpu                76 arch/sh/kernel/smp.c 	set_cpu_online(cpu, true);
cpu                77 arch/sh/kernel/smp.c 	set_cpu_possible(cpu, true);
cpu                79 arch/sh/kernel/smp.c 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
cpu                83 arch/sh/kernel/smp.c void native_cpu_die(unsigned int cpu)
cpu                89 arch/sh/kernel/smp.c 		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
cpu                91 arch/sh/kernel/smp.c 				pr_info("CPU %u is now offline\n", cpu);
cpu                99 arch/sh/kernel/smp.c 	pr_err("CPU %u didn't die...\n", cpu);
cpu               102 arch/sh/kernel/smp.c int native_cpu_disable(unsigned int cpu)
cpu               104 arch/sh/kernel/smp.c 	return cpu == 0 ? -EPERM : 0;
cpu               124 arch/sh/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               127 arch/sh/kernel/smp.c 	ret = mp_ops->cpu_disable(cpu);
cpu               135 arch/sh/kernel/smp.c 	set_cpu_online(cpu, false);
cpu               151 arch/sh/kernel/smp.c 	clear_tasks_mm_cpumask(cpu);
cpu               156 arch/sh/kernel/smp.c int native_cpu_disable(unsigned int cpu)
cpu               161 arch/sh/kernel/smp.c void native_cpu_die(unsigned int cpu)
cpu               175 arch/sh/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               191 arch/sh/kernel/smp.c 	notify_cpu_starting(cpu);
cpu               197 arch/sh/kernel/smp.c 	smp_store_cpu_info(cpu);
cpu               199 arch/sh/kernel/smp.c 	set_cpu_online(cpu, true);
cpu               200 arch/sh/kernel/smp.c 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
cpu               214 arch/sh/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *tsk)
cpu               218 arch/sh/kernel/smp.c 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
cpu               230 arch/sh/kernel/smp.c 	mp_ops->start_cpu(cpu, (unsigned long)_stext);
cpu               234 arch/sh/kernel/smp.c 		if (cpu_online(cpu))
cpu               241 arch/sh/kernel/smp.c 	if (cpu_online(cpu))
cpu               250 arch/sh/kernel/smp.c 	int cpu;
cpu               252 arch/sh/kernel/smp.c 	for_each_online_cpu(cpu)
cpu               253 arch/sh/kernel/smp.c 		bogosum += cpu_data[cpu].loops_per_jiffy;
cpu               261 arch/sh/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               263 arch/sh/kernel/smp.c 	mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
cpu               273 arch/sh/kernel/smp.c 	int cpu;
cpu               275 arch/sh/kernel/smp.c 	for_each_cpu(cpu, mask)
cpu               276 arch/sh/kernel/smp.c 		mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
cpu               279 arch/sh/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               281 arch/sh/kernel/smp.c 	mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
cpu               287 arch/sh/kernel/smp.c 	int cpu;
cpu               289 arch/sh/kernel/smp.c 	for_each_cpu(cpu, mask)
cpu               290 arch/sh/kernel/smp.c 		mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
cpu                16 arch/sh/kernel/topology.c static DEFINE_PER_CPU(struct cpu, cpu_devices);
cpu                21 arch/sh/kernel/topology.c static cpumask_t cpu_coregroup_map(int cpu)
cpu                30 arch/sh/kernel/topology.c const struct cpumask *cpu_coregroup_mask(int cpu)
cpu                32 arch/sh/kernel/topology.c 	return &cpu_core_map[cpu];
cpu                37 arch/sh/kernel/topology.c 	unsigned int cpu;
cpu                39 arch/sh/kernel/topology.c 	for_each_possible_cpu(cpu)
cpu                40 arch/sh/kernel/topology.c 		cpu_core_map[cpu] = cpu_coregroup_map(cpu);
cpu                55 arch/sh/kernel/topology.c 		struct cpu *c = &per_cpu(cpu_devices, i);
cpu               175 arch/sh/kernel/traps.c 	unsigned int cpu = smp_processor_id();
cpu               179 arch/sh/kernel/traps.c 	nmi_count(cpu)++;
cpu                29 arch/sh/mm/cache-j2.c 	unsigned cpu;
cpu                30 arch/sh/mm/cache-j2.c 	for_each_possible_cpu(cpu)
cpu                31 arch/sh/mm/cache-j2.c 		__raw_writel(CACHE_ENABLE | ICACHE_FLUSH, j2_ccr_base + cpu);
cpu                36 arch/sh/mm/cache-j2.c 	unsigned cpu;
cpu                37 arch/sh/mm/cache-j2.c 	for_each_possible_cpu(cpu)
cpu                38 arch/sh/mm/cache-j2.c 		__raw_writel(CACHE_ENABLE | DCACHE_FLUSH, j2_ccr_base + cpu);
cpu                43 arch/sh/mm/cache-j2.c 	unsigned cpu;
cpu                44 arch/sh/mm/cache-j2.c 	for_each_possible_cpu(cpu)
cpu                45 arch/sh/mm/cache-j2.c 		__raw_writel(CACHE_ENABLE | CACHE_FLUSH, j2_ccr_base + cpu);
cpu                91 arch/sh/mm/cache-sh5.c 	unsigned int cpu = smp_processor_id();
cpu               113 arch/sh/mm/cache-sh5.c 	vma_asid = cpu_asid(cpu, vma->vm_mm);
cpu                17 arch/sh/mm/tlbflush_32.c 	unsigned int cpu = smp_processor_id();
cpu                19 arch/sh/mm/tlbflush_32.c 	if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
cpu                24 arch/sh/mm/tlbflush_32.c 		asid = cpu_asid(cpu, vma->vm_mm);
cpu                43 arch/sh/mm/tlbflush_32.c 	unsigned int cpu = smp_processor_id();
cpu                45 arch/sh/mm/tlbflush_32.c 	if (cpu_context(cpu, mm) != NO_CONTEXT) {
cpu                52 arch/sh/mm/tlbflush_32.c 			cpu_context(cpu, mm) = NO_CONTEXT;
cpu                54 arch/sh/mm/tlbflush_32.c 				activate_context(mm, cpu);
cpu                59 arch/sh/mm/tlbflush_32.c 			asid = cpu_asid(cpu, mm);
cpu                80 arch/sh/mm/tlbflush_32.c 	unsigned int cpu = smp_processor_id();
cpu                92 arch/sh/mm/tlbflush_32.c 		asid = cpu_asid(cpu, &init_mm);
cpu               108 arch/sh/mm/tlbflush_32.c 	unsigned int cpu = smp_processor_id();
cpu               112 arch/sh/mm/tlbflush_32.c 	if (cpu_context(cpu, mm) != NO_CONTEXT) {
cpu               116 arch/sh/mm/tlbflush_32.c 		cpu_context(cpu, mm) = NO_CONTEXT;
cpu               118 arch/sh/mm/tlbflush_32.c 			activate_context(mm, cpu);
cpu                85 arch/sh/mm/tlbflush_64.c 	unsigned int cpu = smp_processor_id();
cpu                89 arch/sh/mm/tlbflush_64.c 	if (cpu_context(cpu, mm) == NO_CONTEXT)
cpu                97 arch/sh/mm/tlbflush_64.c 	match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
cpu               131 arch/sh/mm/tlbflush_64.c 	unsigned int cpu = smp_processor_id();
cpu               133 arch/sh/mm/tlbflush_64.c 	if (cpu_context(cpu, mm) == NO_CONTEXT)
cpu               138 arch/sh/mm/tlbflush_64.c 	cpu_context(cpu, mm) = NO_CONTEXT;
cpu               140 arch/sh/mm/tlbflush_64.c 		activate_context(mm, cpu);
cpu                40 arch/sparc/include/asm/cacheflush_64.h void smp_flush_dcache_page_impl(struct page *page, int cpu);
cpu                43 arch/sparc/include/asm/cacheflush_64.h #define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
cpu                15 arch/sparc/include/asm/hvtramp.h 	__u32			cpu;
cpu               236 arch/sparc/include/asm/leon.h void leon_clear_profile_irq(int cpu);
cpu               241 arch/sparc/include/asm/leon.h void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
cpu                84 arch/sparc/include/asm/mmu_context_64.h 	int cpu = smp_processor_id();
cpu                86 arch/sparc/include/asm/mmu_context_64.h 	per_cpu(per_cpu_secondary_mm, cpu) = mm;
cpu               131 arch/sparc/include/asm/mmu_context_64.h 	if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
cpu               132 arch/sparc/include/asm/mmu_context_64.h 		cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu                27 arch/sparc/include/asm/obio.h #define CSR_BASE(cpu) (((CSR_BASE_ADDR >> CSR_CPU_SHIFT) + cpu) << CSR_CPU_SHIFT)
cpu                40 arch/sparc/include/asm/obio.h #define ECSR_BASE(cpu) ((cpu) << ECSR_CPU_SHIFT)
cpu               121 arch/sparc/include/asm/obio.h static inline unsigned int bw_get_prof_limit(int cpu)
cpu               127 arch/sparc/include/asm/obio.h 			      "r" (CSR_BASE(cpu) + BW_PTIMER_LIMIT),
cpu               132 arch/sparc/include/asm/obio.h static inline void bw_set_prof_limit(int cpu, unsigned int limit)
cpu               136 arch/sparc/include/asm/obio.h 			      "r" (CSR_BASE(cpu) + BW_PTIMER_LIMIT),
cpu               140 arch/sparc/include/asm/obio.h static inline unsigned int bw_get_ctrl(int cpu)
cpu               146 arch/sparc/include/asm/obio.h 			      "r" (CSR_BASE(cpu) + BW_CTRL),
cpu               151 arch/sparc/include/asm/obio.h static inline void bw_set_ctrl(int cpu, unsigned int ctrl)
cpu               155 arch/sparc/include/asm/obio.h 			      "r" (CSR_BASE(cpu) + BW_CTRL),
cpu                65 arch/sparc/include/asm/setup.h void sunhv_migrate_hvcons_irq(int cpu);
cpu                63 arch/sparc/include/asm/smp_32.h 	void (*resched)(int cpu);
cpu                64 arch/sparc/include/asm/smp_32.h 	void (*single)(int cpu);
cpu                65 arch/sparc/include/asm/smp_32.h 	void (*mask_one)(int cpu);
cpu                97 arch/sparc/include/asm/smp_32.h void arch_send_call_function_single_ipi(int cpu);
cpu               100 arch/sparc/include/asm/smp_32.h static inline int cpu_logical_map(int cpu)
cpu               102 arch/sparc/include/asm/smp_32.h 	return cpu;
cpu               107 arch/sparc/include/asm/smp_32.h #define raw_smp_processor_id()		(current_thread_info()->cpu)
cpu                40 arch/sparc/include/asm/smp_64.h void arch_send_call_function_single_ipi(int cpu);
cpu                48 arch/sparc/include/asm/smp_64.h #define raw_smp_processor_id() (current_thread_info()->cpu)
cpu                69 arch/sparc/include/asm/smp_64.h void __cpu_die(unsigned int cpu);
cpu                27 arch/sparc/include/asm/switch_to_64.h 	trap_block[current_thread_info()->cpu].thread =			\
cpu                32 arch/sparc/include/asm/thread_info_32.h 	int			cpu;		/* cpu we're on */
cpu                62 arch/sparc/include/asm/thread_info_32.h 	.cpu		=	0,			\
cpu                50 arch/sparc/include/asm/thread_info_64.h 	__u16			cpu;
cpu                42 arch/sparc/include/asm/timer_32.h void register_percpu_ce(int cpu);
cpu                34 arch/sparc/include/asm/timer_64.h unsigned long sparc64_get_clock_tick(unsigned int cpu);
cpu                 9 arch/sparc/include/asm/topology_64.h static inline int cpu_to_node(int cpu)
cpu                11 arch/sparc/include/asm/topology_64.h 	return numa_cpu_lookup_table[cpu];
cpu                46 arch/sparc/include/asm/topology_64.h #define topology_physical_package_id(cpu)	(cpu_data(cpu).proc_id)
cpu                47 arch/sparc/include/asm/topology_64.h #define topology_core_id(cpu)			(cpu_data(cpu).core_id)
cpu                48 arch/sparc/include/asm/topology_64.h #define topology_core_cpumask(cpu)		(&cpu_core_sib_map[cpu])
cpu                49 arch/sparc/include/asm/topology_64.h #define topology_core_cache_cpumask(cpu)	(&cpu_core_sib_cache_map[cpu])
cpu                50 arch/sparc/include/asm/topology_64.h #define topology_sibling_cpumask(cpu)		(&per_cpu(cpu_sibling_map, cpu))
cpu                60 arch/sparc/include/asm/topology_64.h static inline const struct cpumask *cpu_coregroup_mask(int cpu)
cpu                62 arch/sparc/include/asm/topology_64.h 	return &cpu_core_sib_cache_map[cpu];
cpu               282 arch/sparc/kernel/cpu.c 		const struct cpu_info *cpu;
cpu               285 arch/sparc/kernel/cpu.c 		cpu = &manuf->cpu_info[0];
cpu               286 arch/sparc/kernel/cpu.c 		while (cpu->psr_vers != -1)
cpu               288 arch/sparc/kernel/cpu.c 			if (cpu->psr_vers == psr_vers) {
cpu               289 arch/sparc/kernel/cpu.c 				sparc_cpu_type = cpu->name;
cpu               290 arch/sparc/kernel/cpu.c 				sparc_pmu_type = cpu->pmu_name;
cpu               294 arch/sparc/kernel/cpu.c 			cpu++;
cpu                96 arch/sparc/kernel/cpumap.c static int cpuinfo_id(int cpu, int level)
cpu               105 arch/sparc/kernel/cpumap.c 		id = cpu_to_node(cpu);
cpu               108 arch/sparc/kernel/cpumap.c 		id = cpu_data(cpu).core_id;
cpu               111 arch/sparc/kernel/cpumap.c 		id = cpu_data(cpu).proc_id;
cpu               193 arch/sparc/kernel/cpumap.c 	int n, id, cpu, prev_cpu, last_cpu, level;
cpu               204 arch/sparc/kernel/cpumap.c 	prev_cpu = cpu = cpumask_first(cpu_online_mask);
cpu               213 arch/sparc/kernel/cpumap.c 		id = cpuinfo_id(cpu, level);
cpu               227 arch/sparc/kernel/cpumap.c 		    ? cpu : new_tree->level[level + 1].start_index;
cpu               238 arch/sparc/kernel/cpumap.c 	while (++cpu <= last_cpu) {
cpu               239 arch/sparc/kernel/cpumap.c 		if (!cpu_online(cpu))
cpu               244 arch/sparc/kernel/cpumap.c 			id = cpuinfo_id(cpu, level);
cpu               250 arch/sparc/kernel/cpumap.c 			if ((id != prev_id[level]) || (cpu == last_cpu)) {
cpu               256 arch/sparc/kernel/cpumap.c 				if (cpu == last_cpu)
cpu               268 arch/sparc/kernel/cpumap.c 					    (cpu == last_cpu) ? cpu : prev_cpu;
cpu               285 arch/sparc/kernel/cpumap.c 					    ? cpu : level_rover[level + 1];
cpu               290 arch/sparc/kernel/cpumap.c 		prev_cpu = cpu;
cpu               406 arch/sparc/kernel/ds.c 	__u32				cpu;
cpu               466 arch/sparc/kernel/ds.c 		u32 cpu = list[i];
cpu               469 arch/sparc/kernel/ds.c 		if (cpu == CPU_SENTINEL)
cpu               473 arch/sparc/kernel/ds.c 			if (list[j] == cpu)
cpu               492 arch/sparc/kernel/ds.c 	int i, cpu;
cpu               505 arch/sparc/kernel/ds.c 	for_each_cpu(cpu, mask) {
cpu               506 arch/sparc/kernel/ds.c 		ent[i].cpu = cpu;
cpu               514 arch/sparc/kernel/ds.c static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
cpu               525 arch/sparc/kernel/ds.c 		if (ent[i].cpu != cpu)
cpu               537 arch/sparc/kernel/ds.c 	int resp_len, ncpus, cpu;
cpu               553 arch/sparc/kernel/ds.c 	for_each_cpu(cpu, mask) {
cpu               557 arch/sparc/kernel/ds.c 		       dp->id, cpu);
cpu               558 arch/sparc/kernel/ds.c 		err = cpu_up(cpu);
cpu               563 arch/sparc/kernel/ds.c 			if (!cpu_present(cpu)) {
cpu               574 arch/sparc/kernel/ds.c 			dr_cpu_mark(resp, cpu, ncpus, res, stat);
cpu               596 arch/sparc/kernel/ds.c 	int resp_len, ncpus, cpu;
cpu               609 arch/sparc/kernel/ds.c 	for_each_cpu(cpu, mask) {
cpu               613 arch/sparc/kernel/ds.c 		       dp->id, cpu);
cpu               614 arch/sparc/kernel/ds.c 		err = cpu_down(cpu);
cpu               616 arch/sparc/kernel/ds.c 			dr_cpu_mark(resp, cpu, ncpus,
cpu               247 arch/sparc/kernel/iommu.c 				 void *cpu, dma_addr_t dvma,
cpu               260 arch/sparc/kernel/iommu.c 		free_pages((unsigned long)cpu, order);
cpu                75 arch/sparc/kernel/irq.h 	void (*load_profile_irq)(int cpu, unsigned int limit);
cpu               970 arch/sparc/kernel/irq_64.c 	int cpu = hard_smp_processor_id();
cpu               972 arch/sparc/kernel/irq_64.c 	trap_block[cpu].irq_worklist_pa = 0UL;
cpu              1063 arch/sparc/kernel/irq_64.c 	int cpu;
cpu              1065 arch/sparc/kernel/irq_64.c 	for_each_possible_cpu(cpu) {
cpu              1066 arch/sparc/kernel/irq_64.c 		struct trap_per_cpu *tb = &trap_block[cpu];
cpu              1080 arch/sparc/kernel/irq_64.c 	int cpu;
cpu              1082 arch/sparc/kernel/irq_64.c 	for_each_possible_cpu(cpu) {
cpu              1083 arch/sparc/kernel/irq_64.c 		struct trap_per_cpu *tb = &trap_block[cpu];
cpu                94 arch/sparc/kernel/kernel.h void sun4m_clear_profile_irq(int cpu);
cpu                23 arch/sparc/kernel/kstack.h 	if (hardirq_stack[tp->cpu]) {
cpu                24 arch/sparc/kernel/kstack.h 		base = (unsigned long) hardirq_stack[tp->cpu];
cpu                28 arch/sparc/kernel/kstack.h 		base = (unsigned long) softirq_stack[tp->cpu];
cpu                46 arch/sparc/kernel/kstack.h 	if (hardirq_stack[tp->cpu]) {
cpu                47 arch/sparc/kernel/kstack.h 		base = (unsigned long) hardirq_stack[tp->cpu];
cpu                51 arch/sparc/kernel/kstack.h 		base = (unsigned long) softirq_stack[tp->cpu];
cpu                44 arch/sparc/kernel/leon_kernel.c #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
cpu                51 arch/sparc/kernel/leon_kernel.c static inline unsigned int leon_eirq_get(int cpu)
cpu                53 arch/sparc/kernel/leon_kernel.c 	return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f;
cpu                61 arch/sparc/kernel/leon_kernel.c 	int cpu = sparc_leon3_cpuid();
cpu                63 arch/sparc/kernel/leon_kernel.c 	eirq = leon_eirq_get(cpu);
cpu               150 arch/sparc/kernel/leon_kernel.c 	int cpu;
cpu               153 arch/sparc/kernel/leon_kernel.c 	cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
cpu               155 arch/sparc/kernel/leon_kernel.c 	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
cpu               156 arch/sparc/kernel/leon_kernel.c 	LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask));
cpu               163 arch/sparc/kernel/leon_kernel.c 	int cpu;
cpu               166 arch/sparc/kernel/leon_kernel.c 	cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
cpu               168 arch/sparc/kernel/leon_kernel.c 	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
cpu               169 arch/sparc/kernel/leon_kernel.c 	LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
cpu               286 arch/sparc/kernel/leon_kernel.c 	int cpu = smp_processor_id();
cpu               288 arch/sparc/kernel/leon_kernel.c 	leon_clear_profile_irq(cpu);
cpu               290 arch/sparc/kernel/leon_kernel.c 	if (cpu == boot_cpu_id)
cpu               293 arch/sparc/kernel/leon_kernel.c 	ce = &per_cpu(sparc32_clockevent, cpu);
cpu               483 arch/sparc/kernel/leon_kernel.c static void leon_load_profile_irq(int cpu, unsigned int limit)
cpu               488 arch/sparc/kernel/leon_kernel.c void leon_clear_profile_irq(int cpu)
cpu               492 arch/sparc/kernel/leon_kernel.c void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
cpu               497 arch/sparc/kernel/leon_kernel.c 	addr = (unsigned long *)LEON_IMASK(cpu);
cpu               276 arch/sparc/kernel/leon_smp.c 	int cpu, len;
cpu               299 arch/sparc/kernel/leon_smp.c 	for_each_possible_cpu(cpu) {
cpu               300 arch/sparc/kernel/leon_smp.c 		work = &per_cpu(leon_ipi_work, cpu);
cpu               305 arch/sparc/kernel/leon_smp.c static void leon_send_ipi(int cpu, int level)
cpu               309 arch/sparc/kernel/leon_smp.c 	LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask);
cpu               312 arch/sparc/kernel/leon_smp.c static void leon_ipi_single(int cpu)
cpu               314 arch/sparc/kernel/leon_smp.c 	struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
cpu               320 arch/sparc/kernel/leon_smp.c 	leon_send_ipi(cpu, leon_ipi_irq);
cpu               323 arch/sparc/kernel/leon_smp.c static void leon_ipi_mask_one(int cpu)
cpu               325 arch/sparc/kernel/leon_smp.c 	struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
cpu               331 arch/sparc/kernel/leon_smp.c 	leon_send_ipi(cpu, leon_ipi_irq);
cpu               334 arch/sparc/kernel/leon_smp.c static void leon_ipi_resched(int cpu)
cpu               336 arch/sparc/kernel/leon_smp.c 	struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
cpu               342 arch/sparc/kernel/leon_smp.c 	leon_send_ipi(cpu, leon_ipi_irq);
cpu                58 arch/sparc/kernel/nmi.c 		int cpu;
cpu                60 arch/sparc/kernel/nmi.c 		for_each_present_cpu(cpu) {
cpu                61 arch/sparc/kernel/nmi.c 			if (per_cpu(nmi_touch, cpu) != 1)
cpu                62 arch/sparc/kernel/nmi.c 				per_cpu(nmi_touch, cpu) = 1;
cpu               125 arch/sparc/kernel/nmi.c static inline unsigned int get_nmi_count(int cpu)
cpu               127 arch/sparc/kernel/nmi.c 	return cpu_data(cpu).__nmi_count;
cpu               136 arch/sparc/kernel/nmi.c static void report_broken_nmi(int cpu, int *prev_nmi_count)
cpu               142 arch/sparc/kernel/nmi.c 			cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
cpu               149 arch/sparc/kernel/nmi.c 	per_cpu(wd_enabled, cpu) = 0;
cpu               165 arch/sparc/kernel/nmi.c 	int cpu, err;
cpu               181 arch/sparc/kernel/nmi.c 	for_each_possible_cpu(cpu)
cpu               182 arch/sparc/kernel/nmi.c 		prev_nmi_count[cpu] = get_nmi_count(cpu);
cpu               186 arch/sparc/kernel/nmi.c 	for_each_online_cpu(cpu) {
cpu               187 arch/sparc/kernel/nmi.c 		if (!per_cpu(wd_enabled, cpu))
cpu               189 arch/sparc/kernel/nmi.c 		if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
cpu               190 arch/sparc/kernel/nmi.c 			report_broken_nmi(cpu, prev_nmi_count);
cpu               285 arch/sparc/kernel/nmi.c int watchdog_nmi_enable(unsigned int cpu)
cpu               300 arch/sparc/kernel/nmi.c 	smp_call_function_single(cpu, start_nmi_watchdog, NULL, 1);
cpu               308 arch/sparc/kernel/nmi.c void watchdog_nmi_disable(unsigned int cpu)
cpu               313 arch/sparc/kernel/nmi.c 		smp_call_function_single(cpu, stop_nmi_watchdog, NULL, 1);
cpu               321 arch/sparc/kernel/pci_sun4v.c static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
cpu               350 arch/sparc/kernel/pci_sun4v.c 		free_pages((unsigned long)cpu, order);
cpu               829 arch/sparc/kernel/pcic.c static void pcic_load_profile_irq(int cpu, unsigned int limit)
cpu              1593 arch/sparc/kernel/perf_event.c 	int cpu, i;
cpu              1600 arch/sparc/kernel/perf_event.c 	cpu = smp_processor_id();
cpu              1605 arch/sparc/kernel/perf_event.c 			cpu, i, pcr_ops->read_pcr(i));
cpu              1608 arch/sparc/kernel/perf_event.c 			cpu, i, pcr_ops->read_pic(i));
cpu               257 arch/sparc/kernel/process_64.c 	int this_cpu, cpu;
cpu               273 arch/sparc/kernel/process_64.c 	for_each_cpu(cpu, mask) {
cpu               276 arch/sparc/kernel/process_64.c 		if (exclude_self && cpu == this_cpu)
cpu               279 arch/sparc/kernel/process_64.c 		gp = &global_cpu_snapshot[cpu].reg;
cpu               285 arch/sparc/kernel/process_64.c 		       (cpu == this_cpu ? '*' : ' '), cpu,
cpu               356 arch/sparc/kernel/process_64.c 	int this_cpu, cpu;
cpu               368 arch/sparc/kernel/process_64.c 	for_each_online_cpu(cpu) {
cpu               369 arch/sparc/kernel/process_64.c 		struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
cpu               374 arch/sparc/kernel/process_64.c 		       (cpu == this_cpu ? '*' : ' '), cpu,
cpu               382 arch/sparc/kernel/prom_64.c 				       int cpu, unsigned int *thread)
cpu               414 arch/sparc/kernel/prom_64.c 	if (this_cpu_id == cpu) {
cpu               416 arch/sparc/kernel/prom_64.c 			int proc_id = cpu_data(cpu).proc_id;
cpu               414 arch/sparc/kernel/setup_32.c 		struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
cpu               370 arch/sparc/kernel/setup_64.c 	int cpu;
cpu               377 arch/sparc/kernel/setup_64.c 	cpu = hard_smp_processor_id();
cpu               378 arch/sparc/kernel/setup_64.c 	if (cpu >= NR_CPUS) {
cpu               380 arch/sparc/kernel/setup_64.c 			    cpu, NR_CPUS);
cpu               383 arch/sparc/kernel/setup_64.c 	current_thread_info()->cpu = cpu;
cpu                81 arch/sparc/kernel/smp_32.c 	int cpu, num = 0;
cpu                83 arch/sparc/kernel/smp_32.c 	for_each_online_cpu(cpu) {
cpu                85 arch/sparc/kernel/smp_32.c 		bogosum += cpu_data(cpu).udelay_val;
cpu               125 arch/sparc/kernel/smp_32.c void smp_send_reschedule(int cpu)
cpu               132 arch/sparc/kernel/smp_32.c 	sparc32_ipi_ops->resched(cpu);
cpu               139 arch/sparc/kernel/smp_32.c void arch_send_call_function_single_ipi(int cpu)
cpu               142 arch/sparc/kernel/smp_32.c 	sparc32_ipi_ops->single(cpu);
cpu               147 arch/sparc/kernel/smp_32.c 	int cpu;
cpu               150 arch/sparc/kernel/smp_32.c 	for_each_cpu(cpu, mask)
cpu               151 arch/sparc/kernel/smp_32.c 		sparc32_ipi_ops->mask_one(cpu);
cpu               255 arch/sparc/kernel/smp_32.c 	current_thread_info()->cpu = cpuid;
cpu               260 arch/sparc/kernel/smp_32.c int __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu               266 arch/sparc/kernel/smp_32.c 		ret = smp4m_boot_one_cpu(cpu, tidle);
cpu               269 arch/sparc/kernel/smp_32.c 		ret = smp4d_boot_one_cpu(cpu, tidle);
cpu               272 arch/sparc/kernel/smp_32.c 		ret = leon_boot_one_cpu(cpu, tidle);
cpu               289 arch/sparc/kernel/smp_32.c 		cpumask_set_cpu(cpu, &smp_commenced_mask);
cpu               290 arch/sparc/kernel/smp_32.c 		while (!cpu_online(cpu))
cpu               345 arch/sparc/kernel/smp_32.c 	unsigned int cpu;
cpu               354 arch/sparc/kernel/smp_32.c 	cpu = smp_processor_id();
cpu               356 arch/sparc/kernel/smp_32.c 	notify_cpu_starting(cpu);
cpu               360 arch/sparc/kernel/smp_32.c 	set_cpu_online(cpu, true);
cpu               258 arch/sparc/kernel/smp_64.c static void smp_start_sync_tick_client(int cpu);
cpu               260 arch/sparc/kernel/smp_64.c static void smp_synchronize_one_tick(int cpu)
cpu               266 arch/sparc/kernel/smp_64.c 	smp_start_sync_tick_client(cpu);
cpu               291 arch/sparc/kernel/smp_64.c static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
cpu               314 arch/sparc/kernel/smp_64.c 	hdesc->cpu = cpu;
cpu               317 arch/sparc/kernel/smp_64.c 	tb = &trap_block[cpu];
cpu               336 arch/sparc/kernel/smp_64.c 	hv_err = sun4v_cpu_start(cpu, trampoline_ra,
cpu               353 arch/sparc/kernel/smp_64.c static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
cpu               368 arch/sparc/kernel/smp_64.c 			ldom_startcpu_cpuid(cpu,
cpu               373 arch/sparc/kernel/smp_64.c 			prom_startcpu_cpuid(cpu, entry, cookie);
cpu               375 arch/sparc/kernel/smp_64.c 		struct device_node *dp = of_find_node_by_cpuid(cpu);
cpu               389 arch/sparc/kernel/smp_64.c 		printk("Processor %d is stuck.\n", cpu);
cpu               399 arch/sparc/kernel/smp_64.c static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
cpu               406 arch/sparc/kernel/smp_64.c 		cpu = (((cpu & 0x3c) << 1) |
cpu               407 arch/sparc/kernel/smp_64.c 			((cpu & 0x40) >> 4) |
cpu               408 arch/sparc/kernel/smp_64.c 			(cpu & 0x3));
cpu               411 arch/sparc/kernel/smp_64.c 	target = (cpu << 14) | 0x70;
cpu               658 arch/sparc/kernel/smp_64.c 	u16 cpu;
cpu               709 arch/sparc/kernel/smp_64.c 			cpu = cpu_list[i];
cpu               710 arch/sparc/kernel/smp_64.c 			if (likely(cpu == 0xffff)) {
cpu               713 arch/sparc/kernel/smp_64.c 				(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
cpu               714 arch/sparc/kernel/smp_64.c 				ecpuerror_id = cpu + 1;
cpu               715 arch/sparc/kernel/smp_64.c 			} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
cpu               716 arch/sparc/kernel/smp_64.c 				enocpu_id = cpu + 1;
cpu               718 arch/sparc/kernel/smp_64.c 				cpu_list[rem++] = cpu;
cpu               853 arch/sparc/kernel/smp_64.c static void smp_start_sync_tick_client(int cpu)
cpu               856 arch/sparc/kernel/smp_64.c 		      cpumask_of(cpu));
cpu               868 arch/sparc/kernel/smp_64.c void arch_send_call_function_single_ipi(int cpu)
cpu               871 arch/sparc/kernel/smp_64.c 		      cpumask_of(cpu));
cpu               940 arch/sparc/kernel/smp_64.c void smp_flush_dcache_page_impl(struct page *page, int cpu)
cpu               953 arch/sparc/kernel/smp_64.c 	if (cpu == this_cpu) {
cpu               955 arch/sparc/kernel/smp_64.c 	} else if (cpu_online(cpu)) {
cpu               970 arch/sparc/kernel/smp_64.c 				      (u64) pg_addr, cpumask_of(cpu));
cpu              1083 arch/sparc/kernel/smp_64.c 	int cpu = get_cpu();
cpu              1086 arch/sparc/kernel/smp_64.c 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
cpu              1117 arch/sparc/kernel/smp_64.c 	int cpu = get_cpu();
cpu              1124 arch/sparc/kernel/smp_64.c 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
cpu              1137 arch/sparc/kernel/smp_64.c 	int cpu = get_cpu();
cpu              1140 arch/sparc/kernel/smp_64.c 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
cpu              1317 arch/sparc/kernel/smp_64.c int __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu              1319 arch/sparc/kernel/smp_64.c 	int ret = smp_boot_one_cpu(cpu, tidle);
cpu              1322 arch/sparc/kernel/smp_64.c 		cpumask_set_cpu(cpu, &smp_commenced_mask);
cpu              1323 arch/sparc/kernel/smp_64.c 		while (!cpu_online(cpu))
cpu              1325 arch/sparc/kernel/smp_64.c 		if (!cpu_online(cpu)) {
cpu              1332 arch/sparc/kernel/smp_64.c 				smp_synchronize_one_tick(cpu);
cpu              1341 arch/sparc/kernel/smp_64.c 	int cpu = smp_processor_id();
cpu              1347 arch/sparc/kernel/smp_64.c 		struct trap_per_cpu *tb = &trap_block[cpu];
cpu              1359 arch/sparc/kernel/smp_64.c 	cpumask_clear_cpu(cpu, &smp_commenced_mask);
cpu              1376 arch/sparc/kernel/smp_64.c 	int cpu = smp_processor_id();
cpu              1380 arch/sparc/kernel/smp_64.c 	for_each_cpu(i, &cpu_core_map[cpu])
cpu              1381 arch/sparc/kernel/smp_64.c 		cpumask_clear_cpu(cpu, &cpu_core_map[i]);
cpu              1382 arch/sparc/kernel/smp_64.c 	cpumask_clear(&cpu_core_map[cpu]);
cpu              1384 arch/sparc/kernel/smp_64.c 	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
cpu              1385 arch/sparc/kernel/smp_64.c 		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
cpu              1386 arch/sparc/kernel/smp_64.c 	cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
cpu              1388 arch/sparc/kernel/smp_64.c 	c = &cpu_data(cpu);
cpu              1402 arch/sparc/kernel/smp_64.c 	set_cpu_online(cpu, false);
cpu              1409 arch/sparc/kernel/smp_64.c void __cpu_die(unsigned int cpu)
cpu              1415 arch/sparc/kernel/smp_64.c 		if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
cpu              1419 arch/sparc/kernel/smp_64.c 	if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
cpu              1420 arch/sparc/kernel/smp_64.c 		printk(KERN_ERR "CPU %u didn't die...\n", cpu);
cpu              1427 arch/sparc/kernel/smp_64.c 			hv_err = sun4v_cpu_stop(cpu);
cpu              1429 arch/sparc/kernel/smp_64.c 				set_cpu_present(cpu, false);
cpu              1446 arch/sparc/kernel/smp_64.c static void send_cpu_ipi(int cpu)
cpu              1449 arch/sparc/kernel/smp_64.c 			0, 0, cpumask_of(cpu));
cpu              1464 arch/sparc/kernel/smp_64.c static unsigned long send_cpu_poke(int cpu)
cpu              1468 arch/sparc/kernel/smp_64.c 	per_cpu(poke, cpu) = true;
cpu              1469 arch/sparc/kernel/smp_64.c 	hv_err = sun4v_cpu_poke(cpu);
cpu              1471 arch/sparc/kernel/smp_64.c 		per_cpu(poke, cpu) = false;
cpu              1479 arch/sparc/kernel/smp_64.c void smp_send_reschedule(int cpu)
cpu              1481 arch/sparc/kernel/smp_64.c 	if (cpu == smp_processor_id()) {
cpu              1488 arch/sparc/kernel/smp_64.c 	if (cpu_poke && idle_cpu(cpu)) {
cpu              1491 arch/sparc/kernel/smp_64.c 		ret = send_cpu_poke(cpu);
cpu              1501 arch/sparc/kernel/smp_64.c 	send_cpu_ipi(cpu);
cpu              1542 arch/sparc/kernel/smp_64.c 	int cpu;
cpu              1549 arch/sparc/kernel/smp_64.c 		for_each_online_cpu(cpu) {
cpu              1550 arch/sparc/kernel/smp_64.c 			if (cpu == this_cpu)
cpu              1553 arch/sparc/kernel/smp_64.c 			set_cpu_online(cpu, false);
cpu              1557 arch/sparc/kernel/smp_64.c 				hv_err = sun4v_cpu_stop(cpu);
cpu              1563 arch/sparc/kernel/smp_64.c 				prom_stopcpu_cpuid(cpu);
cpu              1582 arch/sparc/kernel/smp_64.c static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
cpu              1587 arch/sparc/kernel/smp_64.c 	int node = cpu_to_node(cpu);
cpu              1593 arch/sparc/kernel/smp_64.c 			cpu, node);
cpu              1595 arch/sparc/kernel/smp_64.c 			 cpu, size, __pa(ptr));
cpu              1600 arch/sparc/kernel/smp_64.c 			 "%016lx\n", cpu, size, node, __pa(ptr));
cpu              1666 arch/sparc/kernel/smp_64.c 	unsigned int cpu;
cpu              1689 arch/sparc/kernel/smp_64.c 	for_each_possible_cpu(cpu)
cpu              1690 arch/sparc/kernel/smp_64.c 		__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
cpu               273 arch/sparc/kernel/sun4d_irq.c static void sun4d_load_profile_irq(int cpu, unsigned int limit)
cpu               276 arch/sparc/kernel/sun4d_irq.c 	bw_set_prof_limit(cpu, value);
cpu               281 arch/sparc/kernel/sun4d_irq.c 	int cpu = 0, mid;
cpu               283 arch/sparc/kernel/sun4d_irq.c 	while (!cpu_find_by_instance(cpu, NULL, &mid)) {
cpu               285 arch/sparc/kernel/sun4d_irq.c 		cpu++;
cpu                85 arch/sparc/kernel/sun4d_smp.c 	while (current_set[cpuid]->cpu != cpuid)
cpu               195 arch/sparc/kernel/sun4d_smp.c 	int cpu;
cpu               200 arch/sparc/kernel/sun4d_smp.c 	for_each_possible_cpu(cpu) {
cpu               201 arch/sparc/kernel/sun4d_smp.c 		work = &per_cpu(sun4d_ipi_work, cpu);
cpu               232 arch/sparc/kernel/sun4d_smp.c static void sun4d_send_ipi(int cpu, int level)
cpu               234 arch/sparc/kernel/sun4d_smp.c 	cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
cpu               237 arch/sparc/kernel/sun4d_smp.c static void sun4d_ipi_single(int cpu)
cpu               239 arch/sparc/kernel/sun4d_smp.c 	struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
cpu               245 arch/sparc/kernel/sun4d_smp.c 	sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
cpu               248 arch/sparc/kernel/sun4d_smp.c static void sun4d_ipi_mask_one(int cpu)
cpu               250 arch/sparc/kernel/sun4d_smp.c 	struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
cpu               256 arch/sparc/kernel/sun4d_smp.c 	sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
cpu               259 arch/sparc/kernel/sun4d_smp.c static void sun4d_ipi_resched(int cpu)
cpu               261 arch/sparc/kernel/sun4d_smp.c 	struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
cpu               267 arch/sparc/kernel/sun4d_smp.c 	sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
cpu               367 arch/sparc/kernel/sun4d_smp.c 	int cpu = hard_smp_processor_id();
cpu               373 arch/sparc/kernel/sun4d_smp.c 	bw_get_prof_limit(cpu);
cpu               376 arch/sparc/kernel/sun4d_smp.c 	cpu_tick[cpu]++;
cpu               377 arch/sparc/kernel/sun4d_smp.c 	if (!(cpu_tick[cpu] & 15)) {
cpu               378 arch/sparc/kernel/sun4d_smp.c 		if (cpu_tick[cpu] == 0x60)
cpu               379 arch/sparc/kernel/sun4d_smp.c 			cpu_tick[cpu] = 0;
cpu               380 arch/sparc/kernel/sun4d_smp.c 		cpu_leds[cpu] = led_mask[cpu_tick[cpu] >> 4];
cpu               381 arch/sparc/kernel/sun4d_smp.c 		show_leds(cpu);
cpu               384 arch/sparc/kernel/sun4d_smp.c 	ce = &per_cpu(sparc32_clockevent, cpu);
cpu               194 arch/sparc/kernel/sun4m_irq.c 	int cpu = smp_processor_id();
cpu               202 arch/sparc/kernel/sun4m_irq.c 			sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->set);
cpu               213 arch/sparc/kernel/sun4m_irq.c 	int cpu = smp_processor_id();
cpu               221 arch/sparc/kernel/sun4m_irq.c 			sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->clear);
cpu               348 arch/sparc/kernel/sun4m_irq.c void sun4m_clear_profile_irq(int cpu)
cpu               350 arch/sparc/kernel/sun4m_irq.c 	sbus_readl(&timers_percpu[cpu]->l14_limit);
cpu               353 arch/sparc/kernel/sun4m_irq.c static void sun4m_load_profile_irq(int cpu, unsigned int limit)
cpu               356 arch/sparc/kernel/sun4m_irq.c 	sbus_writel(value, &timers_percpu[cpu]->l14_limit);
cpu               139 arch/sparc/kernel/sun4m_smp.c static void sun4m_send_ipi(int cpu, int level)
cpu               141 arch/sparc/kernel/sun4m_smp.c 	sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
cpu               144 arch/sparc/kernel/sun4m_smp.c static void sun4m_ipi_resched(int cpu)
cpu               146 arch/sparc/kernel/sun4m_smp.c 	sun4m_send_ipi(cpu, IRQ_IPI_RESCHED);
cpu               149 arch/sparc/kernel/sun4m_smp.c static void sun4m_ipi_single(int cpu)
cpu               151 arch/sparc/kernel/sun4m_smp.c 	sun4m_send_ipi(cpu, IRQ_IPI_SINGLE);
cpu               154 arch/sparc/kernel/sun4m_smp.c static void sun4m_ipi_mask_one(int cpu)
cpu               156 arch/sparc/kernel/sun4m_smp.c 	sun4m_send_ipi(cpu, IRQ_IPI_MASK);
cpu               245 arch/sparc/kernel/sun4m_smp.c 	int cpu = smp_processor_id();
cpu               249 arch/sparc/kernel/sun4m_smp.c 	ce = &per_cpu(sparc32_clockevent, cpu);
cpu               252 arch/sparc/kernel/sun4m_smp.c 		sun4m_clear_profile_irq(cpu);
cpu               254 arch/sparc/kernel/sun4m_smp.c 		sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */
cpu               206 arch/sparc/kernel/sysfs.c static DEFINE_PER_CPU(struct cpu, cpu_devices);
cpu               208 arch/sparc/kernel/sysfs.c static int register_cpu_online(unsigned int cpu)
cpu               210 arch/sparc/kernel/sysfs.c 	struct cpu *c = &per_cpu(cpu_devices, cpu);
cpu               221 arch/sparc/kernel/sysfs.c static int unregister_cpu_online(unsigned int cpu)
cpu               224 arch/sparc/kernel/sysfs.c 	struct cpu *c = &per_cpu(cpu_devices, cpu);
cpu               259 arch/sparc/kernel/sysfs.c 	int cpu, ret;
cpu               265 arch/sparc/kernel/sysfs.c 	for_each_possible_cpu(cpu) {
cpu               266 arch/sparc/kernel/sysfs.c 		struct cpu *c = &per_cpu(cpu_devices, cpu);
cpu               268 arch/sparc/kernel/sysfs.c 		register_cpu(c, cpu);
cpu               188 arch/sparc/kernel/time_32.c 	int cpu = cpumask_first(evt->cpumask);
cpu               190 arch/sparc/kernel/time_32.c 	sparc_config.load_profile_irq(cpu, 0);
cpu               196 arch/sparc/kernel/time_32.c 	int cpu = cpumask_first(evt->cpumask);
cpu               198 arch/sparc/kernel/time_32.c 	sparc_config.load_profile_irq(cpu, SBUS_CLOCK_RATE / HZ);
cpu               205 arch/sparc/kernel/time_32.c 	int cpu = cpumask_first(evt->cpumask);
cpu               208 arch/sparc/kernel/time_32.c 	sparc_config.load_profile_irq(cpu, next);
cpu               212 arch/sparc/kernel/time_32.c void register_percpu_ce(int cpu)
cpu               214 arch/sparc/kernel/time_32.c 	struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu);
cpu               227 arch/sparc/kernel/time_32.c 	ce->cpumask        = cpumask_of(cpu);
cpu               640 arch/sparc/kernel/time_64.c unsigned long sparc64_get_clock_tick(unsigned int cpu)
cpu               642 arch/sparc/kernel/time_64.c 	struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
cpu               646 arch/sparc/kernel/time_64.c 	return cpu_data(cpu).clock_tick;
cpu               656 arch/sparc/kernel/time_64.c 	unsigned int cpu;
cpu               659 arch/sparc/kernel/time_64.c 	for_each_cpu(cpu, freq->policy->cpus) {
cpu               660 arch/sparc/kernel/time_64.c 		ft = &per_cpu(sparc64_freq_table, cpu);
cpu               664 arch/sparc/kernel/time_64.c 			ft->clock_tick_ref = cpu_data(cpu).clock_tick;
cpu               669 arch/sparc/kernel/time_64.c 			cpu_data(cpu).clock_tick =
cpu               720 arch/sparc/kernel/time_64.c 	int cpu = smp_processor_id();
cpu               721 arch/sparc/kernel/time_64.c 	struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
cpu               732 arch/sparc/kernel/time_64.c 		       "Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
cpu               376 arch/sparc/kernel/traps_32.c 	    TI_CPU         != offsetof(struct thread_info, cpu) ||
cpu               830 arch/sparc/kernel/traps_64.c 	int cpu = smp_processor_id();
cpu               835 arch/sparc/kernel/traps_64.c 	p = cheetah_error_log + (cpu * 2);
cpu              1967 arch/sparc/kernel/traps_64.c 			    int cpu, const char *pfx, atomic_t *ocnt)
cpu              1973 arch/sparc/kernel/traps_64.c 	printk("%s: Reporting on cpu %d\n", pfx, cpu);
cpu              2086 arch/sparc/kernel/traps_64.c 	int cpu;
cpu              2088 arch/sparc/kernel/traps_64.c 	cpu = get_cpu();
cpu              2090 arch/sparc/kernel/traps_64.c 	tb = &trap_block[cpu];
cpu              2122 arch/sparc/kernel/traps_64.c 	sun4v_log_error(regs, &local_copy, cpu,
cpu              2204 arch/sparc/kernel/traps_64.c 	int cpu;
cpu              2206 arch/sparc/kernel/traps_64.c 	cpu = get_cpu();
cpu              2208 arch/sparc/kernel/traps_64.c 	tb = &trap_block[cpu];
cpu              2228 arch/sparc/kernel/traps_64.c 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
cpu              2236 arch/sparc/kernel/traps_64.c 	sun4v_log_error(regs, &local_copy, cpu,
cpu              2836 arch/sparc/kernel/traps_64.c 	int cpu = hard_smp_processor_id();
cpu              2837 arch/sparc/kernel/traps_64.c 	struct trap_per_cpu *p = &trap_block[cpu];
cpu              2853 arch/sparc/kernel/traps_64.c 		     TI_CPU != offsetof(struct thread_info, cpu) ||
cpu               245 arch/sparc/mm/init_64.c static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
cpu               263 arch/sparc/mm/init_64.c 			     : "r" (cpu), "r" (mask), "r" (&page->flags),
cpu               291 arch/sparc/mm/init_64.c 			int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
cpu               298 arch/sparc/mm/init_64.c 			if (cpu == this_cpu)
cpu               301 arch/sparc/mm/init_64.c 				smp_flush_dcache_page_impl(page, cpu);
cpu               303 arch/sparc/mm/init_64.c 			clear_dcache_dirty_cpu(page, cpu);
cpu               784 arch/sparc/mm/init_64.c 	int cpu;
cpu               806 arch/sparc/mm/init_64.c 	for_each_online_cpu(cpu) {
cpu               812 arch/sparc/mm/init_64.c 		mm = per_cpu(per_cpu_secondary_mm, cpu);
cpu              1459 arch/sparc/mm/init_64.c 	int cpu;
cpu              1463 arch/sparc/mm/init_64.c 	for_each_cpu(cpu, &mask)
cpu              1464 arch/sparc/mm/init_64.c 		numa_cpu_lookup_table[cpu] = index;
cpu              1469 arch/sparc/mm/init_64.c 		for_each_cpu(cpu, &mask)
cpu              1470 arch/sparc/mm/init_64.c 			printk("%d ", cpu);
cpu              1537 arch/sparc/mm/init_64.c 	unsigned long cpu, index;
cpu              1543 arch/sparc/mm/init_64.c 	for_each_present_cpu(cpu) {
cpu              1544 arch/sparc/mm/init_64.c 		numa_cpu_lookup_table[cpu] = index;
cpu              1545 arch/sparc/mm/init_64.c 		cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
cpu              1547 arch/sparc/mm/init_64.c 		node_masks[index].match = cpu << 36UL;
cpu              1050 arch/sparc/mm/srmmu.c 	int cpu = 0;
cpu              1077 arch/sparc/mm/srmmu.c 			cpu++;
cpu              1078 arch/sparc/mm/srmmu.c 			if (cpu >= nr_cpu_ids || !cpu_online(cpu))
cpu               221 arch/sparc/net/bpf_jit_comp_32.c 	emit_load32(G6, struct thread_info, cpu, REG)
cpu                63 arch/um/include/asm/mmu_context.h 	unsigned cpu = smp_processor_id();
cpu                66 arch/um/include/asm/mmu_context.h 		cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpu                67 arch/um/include/asm/mmu_context.h 		cpumask_set_cpu(cpu, mm_cpumask(next));
cpu                22 arch/um/include/asm/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                37 arch/um/include/asm/thread_info.h 	.cpu =		0,			\
cpu                55 arch/um/include/shared/kern_util.h extern int cpu(void);
cpu               569 arch/um/kernel/irq.c 		task = cpu_tasks[ti->cpu].task;
cpu                80 arch/um/kernel/process.c 	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
cpu               256 arch/um/kernel/process.c 	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
cpu               308 arch/um/kernel/process.c 	return current_thread_info()->cpu;
cpu               445 arch/um/kernel/process.c 	int cpu = current_thread_info()->cpu;
cpu               447 arch/um/kernel/process.c 	return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
cpu                49 arch/unicore32/include/asm/mmu_context.h 	unsigned int cpu = smp_processor_id();
cpu                51 arch/unicore32/include/asm/mmu_context.h 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
cpu                70 arch/unicore32/include/asm/thread_info.h 	__u32			cpu;		/* cpu */
cpu                42 arch/unicore32/kernel/asm-offsets.c 	DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
cpu               104 arch/unicore32/kernel/setup.c 	unsigned int cpu = smp_processor_id();
cpu               105 arch/unicore32/kernel/setup.c 	struct stack *stk = &stacks[cpu];
cpu               280 arch/unicore32/kernel/setup.c static struct cpu cpuinfo_unicore;
cpu                10 arch/x86/boot/compressed/cpuflags.c 	return test_bit(flag, cpu.flags);
cpu                94 arch/x86/boot/cpucheck.c 		err_flags[i] = req_flags[i] & ~cpu.flags[i];
cpu               114 arch/x86/boot/cpucheck.c 	memset(&cpu.flags, 0, sizeof(cpu.flags));
cpu               115 arch/x86/boot/cpucheck.c 	cpu.level = 3;
cpu               118 arch/x86/boot/cpucheck.c 		cpu.level = 4;
cpu               123 arch/x86/boot/cpucheck.c 	if (test_bit(X86_FEATURE_LM, cpu.flags))
cpu               124 arch/x86/boot/cpucheck.c 		cpu.level = 64;
cpu               144 arch/x86/boot/cpucheck.c 		   is_centaur() && cpu.model >= 6) {
cpu               155 arch/x86/boot/cpucheck.c 		set_bit(X86_FEATURE_CX8, cpu.flags);
cpu               167 arch/x86/boot/cpucheck.c 		    : "+a" (level), "=d" (cpu.flags[0])
cpu               174 arch/x86/boot/cpucheck.c 		   is_intel() && cpu.level == 6 &&
cpu               175 arch/x86/boot/cpucheck.c 		   (cpu.model == 9 || cpu.model == 13)) {
cpu               179 arch/x86/boot/cpucheck.c 			set_bit(X86_FEATURE_PAE, cpu.flags);
cpu               192 arch/x86/boot/cpucheck.c 		*cpu_level_ptr = cpu.level;
cpu               196 arch/x86/boot/cpucheck.c 	return (cpu.level < req_level || err) ? -1 : 0;
cpu               205 arch/x86/boot/cpucheck.c 	    cpu.family != 6 ||
cpu               206 arch/x86/boot/cpucheck.c 	    cpu.model != INTEL_FAM6_XEON_PHI_KNL)
cpu                10 arch/x86/boot/cpuflags.c struct cpu_features cpu;
cpu                98 arch/x86/boot/cpuflags.c 		set_bit(X86_FEATURE_FPU, cpu.flags);
cpu               106 arch/x86/boot/cpuflags.c 			cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
cpu               107 arch/x86/boot/cpuflags.c 			      &cpu.flags[0]);
cpu               108 arch/x86/boot/cpuflags.c 			cpu.level = (tfms >> 8) & 15;
cpu               109 arch/x86/boot/cpuflags.c 			cpu.family = cpu.level;
cpu               110 arch/x86/boot/cpuflags.c 			cpu.model = (tfms >> 4) & 15;
cpu               111 arch/x86/boot/cpuflags.c 			if (cpu.level >= 6)
cpu               112 arch/x86/boot/cpuflags.c 				cpu.model += ((tfms >> 16) & 0xf) << 4;
cpu               117 arch/x86/boot/cpuflags.c 					&cpu.flags[16], &ignored);
cpu               125 arch/x86/boot/cpuflags.c 			cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
cpu               126 arch/x86/boot/cpuflags.c 			      &cpu.flags[1]);
cpu                15 arch/x86/boot/cpuflags.h extern struct cpu_features cpu;
cpu                86 arch/x86/boot/main.c 	if (cpu.level < 6)
cpu                14 arch/x86/entry/vdso/vgetcpu.c __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
cpu                16 arch/x86/entry/vdso/vgetcpu.c 	vdso_read_cpunode(cpu, node);
cpu                21 arch/x86/entry/vdso/vgetcpu.c long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
cpu               485 arch/x86/events/amd/core.c static struct amd_nb *amd_alloc_nb(int cpu)
cpu               490 arch/x86/events/amd/core.c 	nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
cpu               506 arch/x86/events/amd/core.c static int amd_pmu_cpu_prepare(int cpu)
cpu               508 arch/x86/events/amd/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpu               515 arch/x86/events/amd/core.c 	cpuc->amd_nb = amd_alloc_nb(cpu);
cpu               522 arch/x86/events/amd/core.c static void amd_pmu_cpu_starting(int cpu)
cpu               524 arch/x86/events/amd/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpu               534 arch/x86/events/amd/core.c 	nb_id = amd_get_nb_id(cpu);
cpu               553 arch/x86/events/amd/core.c static void amd_pmu_cpu_dead(int cpu)
cpu               560 arch/x86/events/amd/core.c 	cpuhw = &per_cpu(cpu_hw_events, cpu);
cpu               951 arch/x86/events/amd/ibs.c static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
cpu               987 arch/x86/events/amd/ibs.c static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
cpu               223 arch/x86/events/amd/iommu.c 	if (event->cpu < 0)
cpu               222 arch/x86/events/amd/power.c static int power_cpu_exit(unsigned int cpu)
cpu               226 arch/x86/events/amd/power.c 	if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask))
cpu               234 arch/x86/events/amd/power.c 	target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
cpu               237 arch/x86/events/amd/power.c 		perf_pmu_migrate_context(&pmu_class, cpu, target);
cpu               242 arch/x86/events/amd/power.c static int power_cpu_init(unsigned int cpu)
cpu               255 arch/x86/events/amd/power.c 	target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
cpu               257 arch/x86/events/amd/power.c 		cpumask_set_cpu(cpu, &cpu_mask);
cpu                43 arch/x86/events/amd/uncore.c 	int cpu;
cpu                75 arch/x86/events/amd/uncore.c 		return *per_cpu_ptr(amd_uncore_nb, event->cpu);
cpu                77 arch/x86/events/amd/uncore.c 		return *per_cpu_ptr(amd_uncore_llc, event->cpu);
cpu               202 arch/x86/events/amd/uncore.c 	if (event->cpu < 0)
cpu               210 arch/x86/events/amd/uncore.c 		int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4);
cpu               213 arch/x86/events/amd/uncore.c 			thread += cpu_data(event->cpu).apicid & 1;
cpu               227 arch/x86/events/amd/uncore.c 	event->cpu = uncore->cpu;
cpu               320 arch/x86/events/amd/uncore.c static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
cpu               323 arch/x86/events/amd/uncore.c 			cpu_to_node(cpu));
cpu               326 arch/x86/events/amd/uncore.c static int amd_uncore_cpu_up_prepare(unsigned int cpu)
cpu               331 arch/x86/events/amd/uncore.c 		uncore_nb = amd_uncore_alloc(cpu);
cpu               334 arch/x86/events/amd/uncore.c 		uncore_nb->cpu = cpu;
cpu               341 arch/x86/events/amd/uncore.c 		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
cpu               345 arch/x86/events/amd/uncore.c 		uncore_llc = amd_uncore_alloc(cpu);
cpu               348 arch/x86/events/amd/uncore.c 		uncore_llc->cpu = cpu;
cpu               355 arch/x86/events/amd/uncore.c 		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
cpu               362 arch/x86/events/amd/uncore.c 		*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
cpu               371 arch/x86/events/amd/uncore.c 	unsigned int cpu;
cpu               374 arch/x86/events/amd/uncore.c 	for_each_online_cpu(cpu) {
cpu               375 arch/x86/events/amd/uncore.c 		that = *per_cpu_ptr(uncores, cpu);
cpu               394 arch/x86/events/amd/uncore.c static int amd_uncore_cpu_starting(unsigned int cpu)
cpu               400 arch/x86/events/amd/uncore.c 		uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
cpu               405 arch/x86/events/amd/uncore.c 		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
cpu               409 arch/x86/events/amd/uncore.c 		uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
cpu               410 arch/x86/events/amd/uncore.c 		uncore->id = per_cpu(cpu_llc_id, cpu);
cpu               413 arch/x86/events/amd/uncore.c 		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
cpu               430 arch/x86/events/amd/uncore.c static void uncore_online(unsigned int cpu,
cpu               433 arch/x86/events/amd/uncore.c 	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
cpu               437 arch/x86/events/amd/uncore.c 	if (cpu == uncore->cpu)
cpu               438 arch/x86/events/amd/uncore.c 		cpumask_set_cpu(cpu, uncore->active_mask);
cpu               441 arch/x86/events/amd/uncore.c static int amd_uncore_cpu_online(unsigned int cpu)
cpu               444 arch/x86/events/amd/uncore.c 		uncore_online(cpu, amd_uncore_nb);
cpu               447 arch/x86/events/amd/uncore.c 		uncore_online(cpu, amd_uncore_llc);
cpu               452 arch/x86/events/amd/uncore.c static void uncore_down_prepare(unsigned int cpu,
cpu               456 arch/x86/events/amd/uncore.c 	struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
cpu               458 arch/x86/events/amd/uncore.c 	if (this->cpu != cpu)
cpu               465 arch/x86/events/amd/uncore.c 		if (cpu == i)
cpu               469 arch/x86/events/amd/uncore.c 			perf_pmu_migrate_context(this->pmu, cpu, i);
cpu               470 arch/x86/events/amd/uncore.c 			cpumask_clear_cpu(cpu, that->active_mask);
cpu               472 arch/x86/events/amd/uncore.c 			that->cpu = i;
cpu               478 arch/x86/events/amd/uncore.c static int amd_uncore_cpu_down_prepare(unsigned int cpu)
cpu               481 arch/x86/events/amd/uncore.c 		uncore_down_prepare(cpu, amd_uncore_nb);
cpu               484 arch/x86/events/amd/uncore.c 		uncore_down_prepare(cpu, amd_uncore_llc);
cpu               489 arch/x86/events/amd/uncore.c static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
cpu               491 arch/x86/events/amd/uncore.c 	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
cpu               493 arch/x86/events/amd/uncore.c 	if (cpu == uncore->cpu)
cpu               494 arch/x86/events/amd/uncore.c 		cpumask_clear_cpu(cpu, uncore->active_mask);
cpu               498 arch/x86/events/amd/uncore.c 	*per_cpu_ptr(uncores, cpu) = NULL;
cpu               501 arch/x86/events/amd/uncore.c static int amd_uncore_cpu_dead(unsigned int cpu)
cpu               504 arch/x86/events/amd/uncore.c 		uncore_dead(cpu, amd_uncore_nb);
cpu               507 arch/x86/events/amd/uncore.c 		uncore_dead(cpu, amd_uncore_llc);
cpu              1358 arch/x86/events/core.c 	int cpu, idx;
cpu              1365 arch/x86/events/core.c 	cpu = smp_processor_id();
cpu              1366 arch/x86/events/core.c 	cpuc = &per_cpu(cpu_hw_events, cpu);
cpu              1375 arch/x86/events/core.c 		pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
cpu              1376 arch/x86/events/core.c 		pr_info("CPU#%d: status:     %016llx\n", cpu, status);
cpu              1377 arch/x86/events/core.c 		pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
cpu              1378 arch/x86/events/core.c 		pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
cpu              1381 arch/x86/events/core.c 			pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
cpu              1385 arch/x86/events/core.c 			pr_info("CPU#%d: debugctl:   %016llx\n", cpu, debugctl);
cpu              1388 arch/x86/events/core.c 	pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
cpu              1394 arch/x86/events/core.c 		prev_left = per_cpu(pmc_prev_left[idx], cpu);
cpu              1397 arch/x86/events/core.c 			cpu, idx, pmc_ctrl);
cpu              1399 arch/x86/events/core.c 			cpu, idx, pmc_count);
cpu              1401 arch/x86/events/core.c 			cpu, idx, prev_left);
cpu              1407 arch/x86/events/core.c 			cpu, idx, pmc_count);
cpu              1578 arch/x86/events/core.c static int x86_pmu_prepare_cpu(unsigned int cpu)
cpu              1580 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpu              1586 arch/x86/events/core.c 		return x86_pmu.cpu_prepare(cpu);
cpu              1590 arch/x86/events/core.c static int x86_pmu_dead_cpu(unsigned int cpu)
cpu              1593 arch/x86/events/core.c 		x86_pmu.cpu_dead(cpu);
cpu              1597 arch/x86/events/core.c static int x86_pmu_online_cpu(unsigned int cpu)
cpu              1599 arch/x86/events/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpu              1609 arch/x86/events/core.c static int x86_pmu_starting_cpu(unsigned int cpu)
cpu              1612 arch/x86/events/core.c 		x86_pmu.cpu_starting(cpu);
cpu              1616 arch/x86/events/core.c static int x86_pmu_dying_cpu(unsigned int cpu)
cpu              1619 arch/x86/events/core.c 		x86_pmu.cpu_dying(cpu);
cpu              1687 arch/x86/events/core.c EVENT_ATTR(cpu-cycles,			CPU_CYCLES		);
cpu              1991 arch/x86/events/core.c 	int cpu = raw_smp_processor_id();
cpu              1998 arch/x86/events/core.c 	if (intel_cpuc_prepare(cpuc, cpu))
cpu                85 arch/x86/events/intel/bts.c 	int cpu = event->cpu;
cpu                86 arch/x86/events/intel/bts.c 	int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
cpu               146 arch/x86/events/intel/bts.c 	int cpu = raw_smp_processor_id();
cpu               147 arch/x86/events/intel/bts.c 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
cpu               185 arch/x86/events/intel/bts.c 	int cpu = raw_smp_processor_id();
cpu               186 arch/x86/events/intel/bts.c 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
cpu              3615 arch/x86/events/intel/core.c static struct intel_shared_regs *allocate_shared_regs(int cpu)
cpu              3621 arch/x86/events/intel/core.c 			    GFP_KERNEL, cpu_to_node(cpu));
cpu              3634 arch/x86/events/intel/core.c static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
cpu              3639 arch/x86/events/intel/core.c 			 GFP_KERNEL, cpu_to_node(cpu));
cpu              3648 arch/x86/events/intel/core.c int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
cpu              3653 arch/x86/events/intel/core.c 		cpuc->shared_regs = allocate_shared_regs(cpu);
cpu              3661 arch/x86/events/intel/core.c 		cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
cpu              3667 arch/x86/events/intel/core.c 		cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
cpu              3688 arch/x86/events/intel/core.c static int intel_pmu_cpu_prepare(int cpu)
cpu              3690 arch/x86/events/intel/core.c 	return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
cpu              3706 arch/x86/events/intel/core.c static void intel_pmu_cpu_starting(int cpu)
cpu              3708 arch/x86/events/intel/core.c 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpu              3709 arch/x86/events/intel/core.c 	int core_id = topology_core_id(cpu);
cpu              3712 arch/x86/events/intel/core.c 	init_debug_store_on_cpu(cpu);
cpu              3736 arch/x86/events/intel/core.c 		for_each_cpu(i, topology_sibling_cpumask(cpu)) {
cpu              3754 arch/x86/events/intel/core.c 		for_each_cpu(i, topology_sibling_cpumask(cpu)) {
cpu              3788 arch/x86/events/intel/core.c static void intel_pmu_cpu_dying(int cpu)
cpu              3790 arch/x86/events/intel/core.c 	fini_debug_store_on_cpu(cpu);
cpu              3810 arch/x86/events/intel/core.c static void intel_pmu_cpu_dead(int cpu)
cpu              3812 arch/x86/events/intel/core.c 	intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
cpu               307 arch/x86/events/intel/cstate.c 	int cpu;
cpu               316 arch/x86/events/intel/cstate.c 	if (event->cpu < 0)
cpu               326 arch/x86/events/intel/cstate.c 		cpu = cpumask_any_and(&cstate_core_cpu_mask,
cpu               327 arch/x86/events/intel/cstate.c 				      topology_sibling_cpumask(event->cpu));
cpu               335 arch/x86/events/intel/cstate.c 		cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
cpu               336 arch/x86/events/intel/cstate.c 				      topology_die_cpumask(event->cpu));
cpu               341 arch/x86/events/intel/cstate.c 	if (cpu >= nr_cpu_ids)
cpu               344 arch/x86/events/intel/cstate.c 	event->cpu = cpu;
cpu               401 arch/x86/events/intel/cstate.c static int cstate_cpu_exit(unsigned int cpu)
cpu               406 arch/x86/events/intel/cstate.c 	    cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
cpu               408 arch/x86/events/intel/cstate.c 		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
cpu               412 arch/x86/events/intel/cstate.c 			perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
cpu               417 arch/x86/events/intel/cstate.c 	    cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
cpu               419 arch/x86/events/intel/cstate.c 		target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
cpu               423 arch/x86/events/intel/cstate.c 			perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
cpu               429 arch/x86/events/intel/cstate.c static int cstate_cpu_init(unsigned int cpu)
cpu               438 arch/x86/events/intel/cstate.c 				 topology_sibling_cpumask(cpu));
cpu               441 arch/x86/events/intel/cstate.c 		cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
cpu               448 arch/x86/events/intel/cstate.c 				 topology_die_cpumask(cpu));
cpu               450 arch/x86/events/intel/cstate.c 		cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
cpu               263 arch/x86/events/intel/ds.c void init_debug_store_on_cpu(int cpu)
cpu               265 arch/x86/events/intel/ds.c 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
cpu               270 arch/x86/events/intel/ds.c 	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
cpu               275 arch/x86/events/intel/ds.c void fini_debug_store_on_cpu(int cpu)
cpu               277 arch/x86/events/intel/ds.c 	if (!per_cpu(cpu_hw_events, cpu).ds)
cpu               280 arch/x86/events/intel/ds.c 	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
cpu               318 arch/x86/events/intel/ds.c static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
cpu               321 arch/x86/events/intel/ds.c 	int node = cpu_to_node(cpu);
cpu               334 arch/x86/events/intel/ds.c static int alloc_pebs_buffer(int cpu)
cpu               336 arch/x86/events/intel/ds.c 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
cpu               339 arch/x86/events/intel/ds.c 	int max, node = cpu_to_node(cpu);
cpu               345 arch/x86/events/intel/ds.c 	buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
cpu               359 arch/x86/events/intel/ds.c 		per_cpu(insn_buffer, cpu) = insn_buff;
cpu               363 arch/x86/events/intel/ds.c 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
cpu               372 arch/x86/events/intel/ds.c static void release_pebs_buffer(int cpu)
cpu               374 arch/x86/events/intel/ds.c 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
cpu               380 arch/x86/events/intel/ds.c 	kfree(per_cpu(insn_buffer, cpu));
cpu               381 arch/x86/events/intel/ds.c 	per_cpu(insn_buffer, cpu) = NULL;
cpu               384 arch/x86/events/intel/ds.c 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
cpu               390 arch/x86/events/intel/ds.c static int alloc_bts_buffer(int cpu)
cpu               392 arch/x86/events/intel/ds.c 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
cpu               400 arch/x86/events/intel/ds.c 	buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
cpu               407 arch/x86/events/intel/ds.c 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
cpu               419 arch/x86/events/intel/ds.c static void release_bts_buffer(int cpu)
cpu               421 arch/x86/events/intel/ds.c 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
cpu               428 arch/x86/events/intel/ds.c 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
cpu               434 arch/x86/events/intel/ds.c static int alloc_ds_buffer(int cpu)
cpu               436 arch/x86/events/intel/ds.c 	struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
cpu               439 arch/x86/events/intel/ds.c 	per_cpu(cpu_hw_events, cpu).ds = ds;
cpu               443 arch/x86/events/intel/ds.c static void release_ds_buffer(int cpu)
cpu               445 arch/x86/events/intel/ds.c 	per_cpu(cpu_hw_events, cpu).ds = NULL;
cpu               450 arch/x86/events/intel/ds.c 	int cpu;
cpu               455 arch/x86/events/intel/ds.c 	for_each_possible_cpu(cpu)
cpu               456 arch/x86/events/intel/ds.c 		release_ds_buffer(cpu);
cpu               458 arch/x86/events/intel/ds.c 	for_each_possible_cpu(cpu) {
cpu               464 arch/x86/events/intel/ds.c 		fini_debug_store_on_cpu(cpu);
cpu               467 arch/x86/events/intel/ds.c 	for_each_possible_cpu(cpu) {
cpu               468 arch/x86/events/intel/ds.c 		release_pebs_buffer(cpu);
cpu               469 arch/x86/events/intel/ds.c 		release_bts_buffer(cpu);
cpu               476 arch/x86/events/intel/ds.c 	int cpu;
cpu               490 arch/x86/events/intel/ds.c 	for_each_possible_cpu(cpu) {
cpu               491 arch/x86/events/intel/ds.c 		if (alloc_ds_buffer(cpu)) {
cpu               496 arch/x86/events/intel/ds.c 		if (!bts_err && alloc_bts_buffer(cpu))
cpu               499 arch/x86/events/intel/ds.c 		if (!pebs_err && alloc_pebs_buffer(cpu))
cpu               507 arch/x86/events/intel/ds.c 		for_each_possible_cpu(cpu)
cpu               508 arch/x86/events/intel/ds.c 			release_bts_buffer(cpu);
cpu               512 arch/x86/events/intel/ds.c 		for_each_possible_cpu(cpu)
cpu               513 arch/x86/events/intel/ds.c 			release_pebs_buffer(cpu);
cpu               517 arch/x86/events/intel/ds.c 		for_each_possible_cpu(cpu)
cpu               518 arch/x86/events/intel/ds.c 			release_ds_buffer(cpu);
cpu               526 arch/x86/events/intel/ds.c 		for_each_possible_cpu(cpu) {
cpu               531 arch/x86/events/intel/ds.c 			init_debug_store_on_cpu(cpu);
cpu               803 arch/x86/events/intel/p4.c 	int cpu = get_cpu();
cpu               813 arch/x86/events/intel/p4.c 	cccr = p4_default_cccr_conf(cpu);
cpu               814 arch/x86/events/intel/p4.c 	escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
cpu               819 arch/x86/events/intel/p4.c 	if (p4_ht_active() && p4_ht_thread(cpu))
cpu              1066 arch/x86/events/intel/p4.c static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
cpu              1073 arch/x86/events/intel/p4.c 	if (!p4_should_swap_ts(hwc->config, cpu))
cpu              1084 arch/x86/events/intel/p4.c 	if (p4_ht_thread(cpu)) {
cpu              1210 arch/x86/events/intel/p4.c 	int cpu = smp_processor_id();
cpu              1224 arch/x86/events/intel/p4.c 		thread = p4_ht_thread(cpu);
cpu              1241 arch/x86/events/intel/p4.c 		if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
cpu              1276 arch/x86/events/intel/p4.c 		if (p4_should_swap_ts(hwc->config, cpu))
cpu              1278 arch/x86/events/intel/p4.c 		p4_pmu_swap_config_ts(hwc, cpu);
cpu               612 arch/x86/events/intel/pt.c static struct topa *topa_alloc(int cpu, gfp_t gfp)
cpu               614 arch/x86/events/intel/pt.c 	int node = cpu_to_node(cpu);
cpu               701 arch/x86/events/intel/pt.c static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp)
cpu               712 arch/x86/events/intel/pt.c 		topa = topa_alloc(cpu, gfp);
cpu              1149 arch/x86/events/intel/pt.c static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu,
cpu              1155 arch/x86/events/intel/pt.c 	topa = topa_alloc(cpu, gfp);
cpu              1162 arch/x86/events/intel/pt.c 		err = topa_insert_pages(buf, cpu, gfp);
cpu              1196 arch/x86/events/intel/pt.c 	int node, ret, cpu = event->cpu;
cpu              1201 arch/x86/events/intel/pt.c 	if (cpu == -1)
cpu              1202 arch/x86/events/intel/pt.c 		cpu = raw_smp_processor_id();
cpu              1203 arch/x86/events/intel/pt.c 	node = cpu_to_node(cpu);
cpu              1216 arch/x86/events/intel/pt.c 	ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL);
cpu              1240 arch/x86/events/intel/pt.c 	int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
cpu              1574 arch/x86/events/intel/pt.c 	int ret, cpu, prior_warn = 0;
cpu              1582 arch/x86/events/intel/pt.c 	for_each_online_cpu(cpu) {
cpu              1585 arch/x86/events/intel/pt.c 		ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
cpu               117 arch/x86/events/intel/rapl.c 	int			cpu;
cpu               143 arch/x86/events/intel/rapl.c static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
cpu               145 arch/x86/events/intel/rapl.c 	unsigned int dieid = topology_logical_die_id(cpu);
cpu               344 arch/x86/events/intel/rapl.c 	if (event->cpu < 0)
cpu               364 arch/x86/events/intel/rapl.c 	pmu = cpu_to_rapl_pmu(event->cpu);
cpu               367 arch/x86/events/intel/rapl.c 	event->cpu = pmu->cpu;
cpu               524 arch/x86/events/intel/rapl.c static int rapl_cpu_offline(unsigned int cpu)
cpu               526 arch/x86/events/intel/rapl.c 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
cpu               530 arch/x86/events/intel/rapl.c 	if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
cpu               533 arch/x86/events/intel/rapl.c 	pmu->cpu = -1;
cpu               535 arch/x86/events/intel/rapl.c 	target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
cpu               540 arch/x86/events/intel/rapl.c 		pmu->cpu = target;
cpu               541 arch/x86/events/intel/rapl.c 		perf_pmu_migrate_context(pmu->pmu, cpu, target);
cpu               546 arch/x86/events/intel/rapl.c static int rapl_cpu_online(unsigned int cpu)
cpu               548 arch/x86/events/intel/rapl.c 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
cpu               552 arch/x86/events/intel/rapl.c 		pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
cpu               562 arch/x86/events/intel/rapl.c 		rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
cpu               569 arch/x86/events/intel/rapl.c 	target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
cpu               573 arch/x86/events/intel/rapl.c 	cpumask_set_cpu(cpu, &rapl_cpu_mask);
cpu               574 arch/x86/events/intel/rapl.c 	pmu->cpu = cpu;
cpu               103 arch/x86/events/intel/uncore.c struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
cpu               105 arch/x86/events/intel/uncore.c 	unsigned int dieid = topology_logical_die_id(cpu);
cpu               271 arch/x86/events/intel/uncore.c 	if (!box->n_active || box->cpu != smp_processor_id())
cpu               329 arch/x86/events/intel/uncore.c 	box->cpu = -1;
cpu               719 arch/x86/events/intel/uncore.c 	if (event->cpu < 0)
cpu               721 arch/x86/events/intel/uncore.c 	box = uncore_pmu_to_box(pmu, event->cpu);
cpu               722 arch/x86/events/intel/uncore.c 	if (!box || box->cpu < 0)
cpu               724 arch/x86/events/intel/uncore.c 	event->cpu = box->cpu;
cpu              1171 arch/x86/events/intel/uncore.c 			WARN_ON_ONCE(box->cpu != -1);
cpu              1172 arch/x86/events/intel/uncore.c 			box->cpu = new_cpu;
cpu              1176 arch/x86/events/intel/uncore.c 		WARN_ON_ONCE(box->cpu != old_cpu);
cpu              1177 arch/x86/events/intel/uncore.c 		box->cpu = -1;
cpu              1183 arch/x86/events/intel/uncore.c 		box->cpu = new_cpu;
cpu              1212 arch/x86/events/intel/uncore.c static int uncore_event_cpu_offline(unsigned int cpu)
cpu              1217 arch/x86/events/intel/uncore.c 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
cpu              1220 arch/x86/events/intel/uncore.c 	target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
cpu              1228 arch/x86/events/intel/uncore.c 	uncore_change_context(uncore_msr_uncores, cpu, target);
cpu              1229 arch/x86/events/intel/uncore.c 	uncore_change_context(uncore_mmio_uncores, cpu, target);
cpu              1230 arch/x86/events/intel/uncore.c 	uncore_change_context(uncore_pci_uncores, cpu, target);
cpu              1234 arch/x86/events/intel/uncore.c 	die = topology_logical_die_id(cpu);
cpu              1241 arch/x86/events/intel/uncore.c 			 unsigned int die, unsigned int cpu)
cpu              1256 arch/x86/events/intel/uncore.c 			box = uncore_alloc_box(type, cpu_to_node(cpu));
cpu              1280 arch/x86/events/intel/uncore.c 			  int id, unsigned int cpu)
cpu              1287 arch/x86/events/intel/uncore.c 	ret = allocate_boxes(types, id, cpu);
cpu              1303 arch/x86/events/intel/uncore.c static int uncore_event_cpu_online(unsigned int cpu)
cpu              1307 arch/x86/events/intel/uncore.c 	die = topology_logical_die_id(cpu);
cpu              1308 arch/x86/events/intel/uncore.c 	msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
cpu              1309 arch/x86/events/intel/uncore.c 	mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu);
cpu              1317 arch/x86/events/intel/uncore.c 	target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
cpu              1321 arch/x86/events/intel/uncore.c 	cpumask_set_cpu(cpu, &uncore_cpu_mask);
cpu              1324 arch/x86/events/intel/uncore.c 		uncore_change_context(uncore_msr_uncores, -1, cpu);
cpu              1326 arch/x86/events/intel/uncore.c 		uncore_change_context(uncore_mmio_uncores, -1, cpu);
cpu              1327 arch/x86/events/intel/uncore.c 	uncore_change_context(uncore_pci_uncores, -1, cpu);
cpu               118 arch/x86/events/intel/uncore.h 	int cpu;	/* cpu to collect events */
cpu               493 arch/x86/events/intel/uncore.h struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
cpu               476 arch/x86/events/intel/uncore_snb.c 	if (event->cpu < 0)
cpu               483 arch/x86/events/intel/uncore_snb.c 	box = uncore_pmu_to_box(pmu, event->cpu);
cpu               484 arch/x86/events/intel/uncore_snb.c 	if (!box || box->cpu < 0)
cpu               487 arch/x86/events/intel/uncore_snb.c 	event->cpu = box->cpu;
cpu               636 arch/x86/events/perf_event.h 	int		(*cpu_prepare)(int cpu);
cpu               637 arch/x86/events/perf_event.h 	void		(*cpu_starting)(int cpu);
cpu               638 arch/x86/events/perf_event.h 	void		(*cpu_dying)(int cpu);
cpu               639 arch/x86/events/perf_event.h 	void		(*cpu_dead)(int cpu);
cpu               952 arch/x86/events/perf_event.h extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
cpu               957 arch/x86/events/perf_event.h void init_debug_store_on_cpu(int cpu);
cpu               959 arch/x86/events/perf_event.h void fini_debug_store_on_cpu(int cpu);
cpu              1093 arch/x86/events/perf_event.h static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
cpu               195 arch/x86/hyperv/hv_apic.c static bool __send_ipi_one(int cpu, int vector)
cpu               199 arch/x86/hyperv/hv_apic.c 	cpumask_set_cpu(cpu, &mask);
cpu               203 arch/x86/hyperv/hv_apic.c static void hv_send_ipi(int cpu, int vector)
cpu               205 arch/x86/hyperv/hv_apic.c 	if (!__send_ipi_one(cpu, vector))
cpu               206 arch/x86/hyperv/hv_apic.c 		orig_apic.send_IPI(cpu, vector);
cpu                55 arch/x86/hyperv/hv_init.c static int hv_cpu_init(unsigned int cpu)
cpu               192 arch/x86/hyperv/hv_init.c static int hv_cpu_die(unsigned int cpu)
cpu               207 arch/x86/hyperv/hv_init.c 	if (hv_vp_assist_page && hv_vp_assist_page[cpu])
cpu               214 arch/x86/hyperv/hv_init.c 	if (re_ctrl.target_vp == hv_vp_index[cpu]) {
cpu               216 arch/x86/hyperv/hv_init.c 		new_cpu = cpumask_any_but(cpu_online_mask, cpu);
cpu                21 arch/x86/hyperv/hv_spinlock.c static void hv_qlock_kick(int cpu)
cpu                23 arch/x86/hyperv/hv_spinlock.c 	apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
cpu                58 arch/x86/hyperv/mmu.c 	int cpu, vcpu, gva_n, max_gvas;
cpu               115 arch/x86/hyperv/mmu.c 		for_each_cpu(cpu, cpus) {
cpu               116 arch/x86/hyperv/mmu.c 			vcpu = hv_cpu_number_to_vp_number(cpu);
cpu                38 arch/x86/include/asm/amd_nb.h 	unsigned int	 cpu;			/* CPU which controls MCA bank */
cpu                32 arch/x86/include/asm/apb_timer.h extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
cpu               301 arch/x86/include/asm/apic.h 	void	(*send_IPI)(int cpu, int vector);
cpu               314 arch/x86/include/asm/apic.h 	u32	(*calc_dest_apicid)(unsigned int cpu);
cpu               354 arch/x86/include/asm/apic.h 	int (*x86_32_early_logical_apicid)(int cpu);
cpu               504 arch/x86/include/asm/apic.h extern u32 apic_default_calc_apicid(unsigned int cpu);
cpu               505 arch/x86/include/asm/apic.h extern u32 apic_flat_calc_apicid(unsigned int cpu);
cpu                 5 arch/x86/include/asm/cacheinfo.h void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
cpu                 6 arch/x86/include/asm/cacheinfo.h void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
cpu                19 arch/x86/include/asm/cpu.h #define cpu_physical_id(cpu)			boot_cpu_physical_apicid
cpu                20 arch/x86/include/asm/cpu.h #define cpu_acpi_id(cpu)			0
cpu                26 arch/x86/include/asm/cpu.h 	struct cpu cpu;
cpu                34 arch/x86/include/asm/cpu.h extern int _debug_hotplug_cpu(int cpu, int action);
cpu               134 arch/x86/include/asm/cpu_entry_area.h extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
cpu               136 arch/x86/include/asm/cpu_entry_area.h static inline struct entry_stack *cpu_entry_stack(int cpu)
cpu               138 arch/x86/include/asm/cpu_entry_area.h 	return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
cpu                 5 arch/x86/include/asm/cpuidle_haltpoll.h void arch_haltpoll_enable(unsigned int cpu);
cpu                 6 arch/x86/include/asm/cpuidle_haltpoll.h void arch_haltpoll_disable(unsigned int cpu);
cpu                55 arch/x86/include/asm/desc.h static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
cpu                57 arch/x86/include/asm/desc.h 	return per_cpu(gdt_page, cpu).gdt;
cpu                67 arch/x86/include/asm/desc.h static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
cpu                69 arch/x86/include/asm/desc.h 	return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt;
cpu                79 arch/x86/include/asm/desc.h static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
cpu                81 arch/x86/include/asm/desc.h 	return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu));
cpu               123 arch/x86/include/asm/desc.h #define load_TLS(t, cpu)			native_load_tls(t, cpu)
cpu               184 arch/x86/include/asm/desc.h static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
cpu               186 arch/x86/include/asm/desc.h 	struct desc_struct *d = get_cpu_gdt_rw(cpu);
cpu               194 arch/x86/include/asm/desc.h #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
cpu               201 arch/x86/include/asm/desc.h 		unsigned cpu = smp_processor_id();
cpu               206 arch/x86/include/asm/desc.h 		write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_LDT,
cpu               241 arch/x86/include/asm/desc.h 	int cpu = raw_smp_processor_id();
cpu               246 arch/x86/include/asm/desc.h 	fixmap_gdt = get_cpu_gdt_ro(cpu);
cpu               253 arch/x86/include/asm/desc.h 		load_direct_gdt(cpu);
cpu               258 arch/x86/include/asm/desc.h 		load_fixmap_gdt(cpu);
cpu               276 arch/x86/include/asm/desc.h static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
cpu               278 arch/x86/include/asm/desc.h 	struct desc_struct *gdt = get_cpu_gdt_rw(cpu);
cpu                13 arch/x86/include/asm/espfix.h extern void init_espfix_ap(int cpu);
cpu                15 arch/x86/include/asm/espfix.h static inline void init_espfix_ap(int cpu) { }
cpu               510 arch/x86/include/asm/fpu/internal.h static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
cpu               512 arch/x86/include/asm/fpu/internal.h 	return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
cpu               537 arch/x86/include/asm/fpu/internal.h 	int cpu = smp_processor_id();
cpu               542 arch/x86/include/asm/fpu/internal.h 	if (!fpregs_state_valid(fpu, cpu)) {
cpu               545 arch/x86/include/asm/fpu/internal.h 		fpu->last_cpu = cpu;
cpu               572 arch/x86/include/asm/fpu/internal.h static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
cpu               578 arch/x86/include/asm/fpu/internal.h 			old_fpu->last_cpu = cpu;
cpu                57 arch/x86/include/asm/hardirq.h extern u64 arch_irq_stat_cpu(unsigned int cpu);
cpu                19 arch/x86/include/asm/irq.h extern int irq_init_percpu_irqstack(unsigned int cpu);
cpu              1031 arch/x86/include/asm/kvm_host.h 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
cpu              1137 arch/x86/include/asm/kvm_host.h 	void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
cpu               335 arch/x86/include/asm/mce.h extern int mce_threshold_create_device(unsigned int cpu);
cpu               336 arch/x86/include/asm/mce.h extern int mce_threshold_remove_device(unsigned int cpu);
cpu               343 arch/x86/include/asm/mce.h static inline int mce_threshold_create_device(unsigned int cpu)		{ return 0; };
cpu               344 arch/x86/include/asm/mce.h static inline int mce_threshold_remove_device(unsigned int cpu)		{ return 0; };
cpu                35 arch/x86/include/asm/microcode.h 	enum ucode_state (*request_microcode_user) (int cpu,
cpu                38 arch/x86/include/asm/microcode.h 	enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
cpu                41 arch/x86/include/asm/microcode.h 	void (*microcode_fini_cpu) (int cpu);
cpu                49 arch/x86/include/asm/microcode.h 	enum ucode_state (*apply_microcode) (int cpu);
cpu                50 arch/x86/include/asm/microcode.h 	int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
cpu                65 arch/x86/include/asm/mmu.h void leave_mm(int cpu);
cpu               211 arch/x86/include/asm/mshyperv.h static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
cpu               216 arch/x86/include/asm/mshyperv.h 	return hv_vp_assist_page[cpu];
cpu               250 arch/x86/include/asm/mshyperv.h static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
cpu               339 arch/x86/include/asm/msr.h int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
cpu               340 arch/x86/include/asm/msr.h int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
cpu               341 arch/x86/include/asm/msr.h int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
cpu               342 arch/x86/include/asm/msr.h int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
cpu               345 arch/x86/include/asm/msr.h int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
cpu               346 arch/x86/include/asm/msr.h int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
cpu               347 arch/x86/include/asm/msr.h int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
cpu               348 arch/x86/include/asm/msr.h int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
cpu               349 arch/x86/include/asm/msr.h int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
cpu               350 arch/x86/include/asm/msr.h int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
cpu               352 arch/x86/include/asm/msr.h static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
cpu               357 arch/x86/include/asm/msr.h static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
cpu               362 arch/x86/include/asm/msr.h static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
cpu               367 arch/x86/include/asm/msr.h static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
cpu               382 arch/x86/include/asm/msr.h static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
cpu               387 arch/x86/include/asm/msr.h static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
cpu               391 arch/x86/include/asm/msr.h static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
cpu               395 arch/x86/include/asm/msr.h static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
cpu               399 arch/x86/include/asm/msr.h static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
cpu               403 arch/x86/include/asm/msr.h static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
cpu                42 arch/x86/include/asm/numa.h extern int numa_cpu_node(int cpu);
cpu                49 arch/x86/include/asm/numa.h static inline int numa_cpu_node(int cpu)
cpu                60 arch/x86/include/asm/numa.h extern void numa_set_node(int cpu, int node);
cpu                61 arch/x86/include/asm/numa.h extern void numa_clear_node(int cpu);
cpu                63 arch/x86/include/asm/numa.h extern void numa_add_cpu(int cpu);
cpu                64 arch/x86/include/asm/numa.h extern void numa_remove_cpu(int cpu);
cpu                66 arch/x86/include/asm/numa.h static inline void numa_set_node(int cpu, int node)	{ }
cpu                67 arch/x86/include/asm/numa.h static inline void numa_clear_node(int cpu)		{ }
cpu                69 arch/x86/include/asm/numa.h static inline void numa_add_cpu(int cpu)		{ }
cpu                70 arch/x86/include/asm/numa.h static inline void numa_remove_cpu(int cpu)		{ }
cpu                74 arch/x86/include/asm/numa.h void debug_cpumask_set_cpu(int cpu, int node, bool enable);
cpu                31 arch/x86/include/asm/paravirt.h __visible bool __native_vcpu_is_preempted(long cpu);
cpu                34 arch/x86/include/asm/paravirt.h static inline u64 paravirt_steal_clock(int cpu)
cpu                36 arch/x86/include/asm/paravirt.h 	return PVOP_CALL1(u64, time.steal_clock, cpu);
cpu                42 arch/x86/include/asm/paravirt.h 	pv_ops.cpu.io_delay();
cpu                44 arch/x86/include/asm/paravirt.h 	pv_ops.cpu.io_delay();
cpu                45 arch/x86/include/asm/paravirt.h 	pv_ops.cpu.io_delay();
cpu                46 arch/x86/include/asm/paravirt.h 	pv_ops.cpu.io_delay();
cpu                84 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(cpu.load_sp0, sp0);
cpu                91 arch/x86/include/asm/paravirt.h 	PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
cpu                99 arch/x86/include/asm/paravirt.h 	return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
cpu               104 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(cpu.set_debugreg, reg, val);
cpu               109 arch/x86/include/asm/paravirt.h 	return PVOP_CALL0(unsigned long, cpu.read_cr0);
cpu               114 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(cpu.write_cr0, x);
cpu               139 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(cpu.write_cr4, x);
cpu               154 arch/x86/include/asm/paravirt.h 	PVOP_VCALL0(cpu.wbinvd);
cpu               161 arch/x86/include/asm/paravirt.h 	return PVOP_CALL1(u64, cpu.read_msr, msr);
cpu               167 arch/x86/include/asm/paravirt.h 	PVOP_VCALL3(cpu.write_msr, msr, low, high);
cpu               172 arch/x86/include/asm/paravirt.h 	return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
cpu               178 arch/x86/include/asm/paravirt.h 	return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
cpu               225 arch/x86/include/asm/paravirt.h 	return PVOP_CALL1(u64, cpu.read_pmc, counter);
cpu               239 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
cpu               244 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(cpu.free_ldt, ldt, entries);
cpu               249 arch/x86/include/asm/paravirt.h 	PVOP_VCALL0(cpu.load_tr_desc);
cpu               253 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(cpu.load_gdt, dtr);
cpu               257 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(cpu.load_idt, dtr);
cpu               261 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(cpu.set_ldt, addr, entries);
cpu               265 arch/x86/include/asm/paravirt.h 	return PVOP_CALL0(unsigned long, cpu.store_tr);
cpu               269 arch/x86/include/asm/paravirt.h static inline void load_TLS(struct thread_struct *t, unsigned cpu)
cpu               271 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(cpu.load_tls, t, cpu);
cpu               277 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(cpu.load_gs_index, gs);
cpu               284 arch/x86/include/asm/paravirt.h 	PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
cpu               290 arch/x86/include/asm/paravirt.h 	PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
cpu               295 arch/x86/include/asm/paravirt.h 	PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
cpu               299 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(cpu.set_iopl_mask, mask);
cpu               606 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(cpu.start_context_switch, prev);
cpu               611 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(cpu.end_context_switch, next);
cpu               655 arch/x86/include/asm/paravirt.h static __always_inline void pv_kick(int cpu)
cpu               657 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(lock.kick, cpu);
cpu               660 arch/x86/include/asm/paravirt.h static __always_inline bool pv_vcpu_is_preempted(long cpu)
cpu               662 arch/x86/include/asm/paravirt.h 	return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
cpu               666 arch/x86/include/asm/paravirt.h bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
cpu               106 arch/x86/include/asm/paravirt_types.h 	unsigned long long (*steal_clock)(int cpu);
cpu               128 arch/x86/include/asm/paravirt_types.h 	void (*load_tls)(struct thread_struct *t, unsigned int cpu);
cpu               318 arch/x86/include/asm/paravirt_types.h 	void (*kick)(int cpu);
cpu               329 arch/x86/include/asm/paravirt_types.h 	struct pv_cpu_ops	cpu;
cpu               189 arch/x86/include/asm/perf_event_p4.h static inline int p4_ht_thread(int cpu)
cpu               193 arch/x86/include/asm/perf_event_p4.h 		return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
cpu               198 arch/x86/include/asm/perf_event_p4.h static inline int p4_should_swap_ts(u64 config, int cpu)
cpu               200 arch/x86/include/asm/perf_event_p4.h 	return p4_ht_config_thread(config) ^ p4_ht_thread(cpu);
cpu               203 arch/x86/include/asm/perf_event_p4.h static inline u32 p4_default_cccr_conf(int cpu)
cpu               212 arch/x86/include/asm/perf_event_p4.h 	if (!p4_ht_thread(cpu))
cpu               220 arch/x86/include/asm/perf_event_p4.h static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
cpu               224 arch/x86/include/asm/perf_event_p4.h 	if (!p4_ht_thread(cpu)) {
cpu                45 arch/x86/include/asm/preempt.h #define init_idle_preempt_count(p, cpu) do { \
cpu                46 arch/x86/include/asm/preempt.h 	per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
cpu               166 arch/x86/include/asm/processor.h #define cpu_data(cpu)		per_cpu(cpu_info, cpu)
cpu               169 arch/x86/include/asm/processor.h #define cpu_data(cpu)		boot_cpu_data
cpu               401 arch/x86/include/asm/processor.h static inline unsigned long cpu_kernelmode_gs_base(int cpu)
cpu               403 arch/x86/include/asm/processor.h 	return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
cpu               937 arch/x86/include/asm/processor.h extern u16 amd_get_nb_id(int cpu);
cpu               940 arch/x86/include/asm/processor.h static inline u16 amd_get_nb_id(int cpu)		{ return 0; }
cpu                59 arch/x86/include/asm/qspinlock.h static inline bool vcpu_is_preempted(long cpu)
cpu                61 arch/x86/include/asm/qspinlock.h 	return pv_vcpu_is_preempted(cpu);
cpu               249 arch/x86/include/asm/segment.h static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node)
cpu               251 arch/x86/include/asm/segment.h 	return (node << VDSO_CPUNODE_BITS) | cpu;
cpu               254 arch/x86/include/asm/segment.h static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
cpu               271 arch/x86/include/asm/segment.h 	if (cpu)
cpu               272 arch/x86/include/asm/segment.h 		*cpu = (p & VDSO_CPUNODE_MASK);
cpu                32 arch/x86/include/asm/smp.h static inline struct cpumask *cpu_llc_shared_mask(int cpu)
cpu                34 arch/x86/include/asm/smp.h 	return per_cpu(cpu_llc_shared_map, cpu);
cpu                53 arch/x86/include/asm/smp.h 	void (*smp_send_reschedule)(int cpu);
cpu                55 arch/x86/include/asm/smp.h 	int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
cpu                57 arch/x86/include/asm/smp.h 	void (*cpu_die)(unsigned int cpu);
cpu                61 arch/x86/include/asm/smp.h 	void (*send_call_func_single_ipi)(int cpu);
cpu                65 arch/x86/include/asm/smp.h extern void set_cpu_sibling_map(int cpu);
cpu                95 arch/x86/include/asm/smp.h static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu                97 arch/x86/include/asm/smp.h 	return smp_ops.cpu_up(cpu, tidle);
cpu               105 arch/x86/include/asm/smp.h static inline void __cpu_die(unsigned int cpu)
cpu               107 arch/x86/include/asm/smp.h 	smp_ops.cpu_die(cpu);
cpu               115 arch/x86/include/asm/smp.h static inline void smp_send_reschedule(int cpu)
cpu               117 arch/x86/include/asm/smp.h 	smp_ops.smp_send_reschedule(cpu);
cpu               120 arch/x86/include/asm/smp.h static inline void arch_send_call_function_single_ipi(int cpu)
cpu               122 arch/x86/include/asm/smp.h 	smp_ops.send_call_func_single_ipi(cpu);
cpu               138 arch/x86/include/asm/smp.h int common_cpu_die(unsigned int cpu);
cpu               139 arch/x86/include/asm/smp.h void native_cpu_die(unsigned int cpu);
cpu               143 arch/x86/include/asm/smp.h void wbinvd_on_cpu(int cpu);
cpu               146 arch/x86/include/asm/smp.h void native_smp_send_reschedule(int cpu);
cpu               148 arch/x86/include/asm/smp.h void native_send_call_func_single_ipi(int cpu);
cpu               149 arch/x86/include/asm/smp.h void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
cpu               159 arch/x86/include/asm/smp.h #define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
cpu               160 arch/x86/include/asm/smp.h #define cpu_acpi_id(cpu)	per_cpu(x86_cpu_to_acpiid, cpu)
cpu               177 arch/x86/include/asm/smp.h #define wbinvd_on_cpu(cpu)     wbinvd()
cpu                93 arch/x86/include/asm/stackprotector.h static inline void setup_stack_canary_segment(int cpu)
cpu                96 arch/x86/include/asm/stackprotector.h 	unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
cpu                97 arch/x86/include/asm/stackprotector.h 	struct desc_struct *gdt_table = get_cpu_gdt_rw(cpu);
cpu               119 arch/x86/include/asm/stackprotector.h static inline void setup_stack_canary_segment(int cpu)
cpu                48 arch/x86/include/asm/topology.h extern int __cpu_to_node(int cpu);
cpu                51 arch/x86/include/asm/topology.h extern int early_cpu_to_node(int cpu);
cpu                56 arch/x86/include/asm/topology.h static inline int early_cpu_to_node(int cpu)
cpu                58 arch/x86/include/asm/topology.h 	return early_per_cpu(x86_cpu_to_node_map, cpu);
cpu                94 arch/x86/include/asm/topology.h static inline int early_cpu_to_node(int cpu)
cpu               105 arch/x86/include/asm/topology.h extern const struct cpumask *cpu_coregroup_mask(int cpu);
cpu               107 arch/x86/include/asm/topology.h #define topology_logical_package_id(cpu)	(cpu_data(cpu).logical_proc_id)
cpu               108 arch/x86/include/asm/topology.h #define topology_physical_package_id(cpu)	(cpu_data(cpu).phys_proc_id)
cpu               109 arch/x86/include/asm/topology.h #define topology_logical_die_id(cpu)		(cpu_data(cpu).logical_die_id)
cpu               110 arch/x86/include/asm/topology.h #define topology_die_id(cpu)			(cpu_data(cpu).cpu_die_id)
cpu               111 arch/x86/include/asm/topology.h #define topology_core_id(cpu)			(cpu_data(cpu).cpu_core_id)
cpu               114 arch/x86/include/asm/topology.h #define topology_die_cpumask(cpu)		(per_cpu(cpu_die_map, cpu))
cpu               115 arch/x86/include/asm/topology.h #define topology_core_cpumask(cpu)		(per_cpu(cpu_core_map, cpu))
cpu               116 arch/x86/include/asm/topology.h #define topology_sibling_cpumask(cpu)		(per_cpu(cpu_sibling_map, cpu))
cpu               135 arch/x86/include/asm/topology.h int topology_update_package_map(unsigned int apicid, unsigned int cpu);
cpu               136 arch/x86/include/asm/topology.h int topology_update_die_map(unsigned int dieid, unsigned int cpu);
cpu               138 arch/x86/include/asm/topology.h int topology_phys_to_logical_die(unsigned int die, unsigned int cpu);
cpu               139 arch/x86/include/asm/topology.h bool topology_is_primary_thread(unsigned int cpu);
cpu               144 arch/x86/include/asm/topology.h topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
cpu               146 arch/x86/include/asm/topology.h topology_update_die_map(unsigned int dieid, unsigned int cpu) { return 0; }
cpu               149 arch/x86/include/asm/topology.h 		unsigned int cpu) { return 0; }
cpu               152 arch/x86/include/asm/topology.h static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
cpu               144 arch/x86/include/asm/trace/irq_vectors.h 		 unsigned int cpu, unsigned int apicdest),
cpu               146 arch/x86/include/asm/trace/irq_vectors.h 	TP_ARGS(irq, vector, cpu, apicdest),
cpu               151 arch/x86/include/asm/trace/irq_vectors.h 		__field(	unsigned int,	cpu		)
cpu               158 arch/x86/include/asm/trace/irq_vectors.h 		__entry->cpu		= cpu;
cpu               163 arch/x86/include/asm/trace/irq_vectors.h 		  __entry->irq, __entry->vector, __entry->cpu,
cpu               170 arch/x86/include/asm/trace/irq_vectors.h 		 unsigned int cpu, unsigned int prev_vector,
cpu               173 arch/x86/include/asm/trace/irq_vectors.h 	TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu),
cpu               178 arch/x86/include/asm/trace/irq_vectors.h 		__field(	unsigned int,	cpu		)
cpu               186 arch/x86/include/asm/trace/irq_vectors.h 		__entry->cpu		= cpu;
cpu               193 arch/x86/include/asm/trace/irq_vectors.h 		  __entry->irq, __entry->vector, __entry->cpu,
cpu               200 arch/x86/include/asm/trace/irq_vectors.h 		 unsigned int cpu, unsigned int prev_vector,		\
cpu               202 arch/x86/include/asm/trace/irq_vectors.h 	TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL);	\
cpu               364 arch/x86/include/asm/trace/irq_vectors.h 	TP_PROTO(unsigned int irq, unsigned int cpu, unsigned int vector,
cpu               367 arch/x86/include/asm/trace/irq_vectors.h 	TP_ARGS(irq, cpu, vector, is_managed),
cpu               371 arch/x86/include/asm/trace/irq_vectors.h 		__field(	unsigned int,	cpu		)
cpu               378 arch/x86/include/asm/trace/irq_vectors.h 		__entry->cpu		= cpu;
cpu               384 arch/x86/include/asm/trace/irq_vectors.h 		  __entry->irq, __entry->cpu, __entry->vector,
cpu                61 arch/x86/include/asm/tsc.h extern void check_tsc_sync_source(int cpu);
cpu                66 arch/x86/include/asm/tsc.h static inline void check_tsc_sync_source(int cpu) { }
cpu                57 arch/x86/include/asm/uv/uv_bau.h #define cpubit_isset(cpu, bau_local_cpumask) \
cpu                58 arch/x86/include/asm/uv/uv_bau.h 	test_bit((cpu), (bau_local_cpumask).bits)
cpu               630 arch/x86/include/asm/uv/uv_bau.h 	short			cpu;
cpu               198 arch/x86/include/asm/uv/uv_hub.h #define uv_cpu_info_per(cpu)	(&per_cpu(__uv_cpu_info, cpu))
cpu               201 arch/x86/include/asm/uv/uv_hub.h #define	uv_cpu_scir_info(cpu)	(&uv_cpu_info_per(cpu)->scir)
cpu               216 arch/x86/include/asm/uv/uv_hub.h static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
cpu               218 arch/x86/include/asm/uv/uv_hub.h 	return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info;
cpu               712 arch/x86/include/asm/uv/uv_hub.h static inline int uv_cpu_blade_processor_id(int cpu)
cpu               714 arch/x86/include/asm/uv/uv_hub.h 	return uv_cpu_info_per(cpu)->blade_cpu_id;
cpu               741 arch/x86/include/asm/uv/uv_hub.h static inline int uv_cpu_to_blade_id(int cpu)
cpu               743 arch/x86/include/asm/uv/uv_hub.h 	return uv_node_to_blade_id(cpu_to_node(cpu));
cpu               771 arch/x86/include/asm/uv/uv_hub.h static inline int uv_cpu_to_pnode(int cpu)
cpu               773 arch/x86/include/asm/uv/uv_hub.h 	return uv_cpu_hub_info(cpu)->pnode;
cpu               847 arch/x86/include/asm/uv/uv_hub.h #define uv_cpu_nmi_per(cpu)		(per_cpu(uv_cpu_nmi, cpu))
cpu               848 arch/x86/include/asm/uv/uv_hub.h #define uv_hub_nmi_per(cpu)		(uv_cpu_nmi_per(cpu).hub)
cpu               870 arch/x86/include/asm/uv/uv_hub.h static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
cpu               872 arch/x86/include/asm/uv/uv_hub.h 	if (uv_cpu_scir_info(cpu)->state != value) {
cpu               873 arch/x86/include/asm/uv/uv_hub.h 		uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
cpu               874 arch/x86/include/asm/uv/uv_hub.h 				uv_cpu_scir_info(cpu)->offset, value);
cpu               875 arch/x86/include/asm/uv/uv_hub.h 		uv_cpu_scir_info(cpu)->state = value;
cpu               244 arch/x86/include/asm/x86_init.h 	void (*pin_vcpu)(int cpu);
cpu               309 arch/x86/include/asm/x86_init.h extern void x86_op_int_noop(int cpu);
cpu                28 arch/x86/include/uapi/asm/mce.h 	__u8  cpu;		/* CPU number; obsoleted by extcpu */
cpu               160 arch/x86/kernel/acpi/boot.c 	int cpu;
cpu               175 arch/x86/kernel/acpi/boot.c 	cpu = generic_processor_info(id, ver);
cpu               176 arch/x86/kernel/acpi/boot.c 	if (cpu >= 0)
cpu               177 arch/x86/kernel/acpi/boot.c 		early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
cpu               179 arch/x86/kernel/acpi/boot.c 	return cpu;
cpu               735 arch/x86/kernel/acpi/boot.c static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
cpu               743 arch/x86/kernel/acpi/boot.c 		numa_set_node(cpu, nid);
cpu               752 arch/x86/kernel/acpi/boot.c 	int cpu;
cpu               754 arch/x86/kernel/acpi/boot.c 	cpu = acpi_register_lapic(physid, acpi_id, ACPI_MADT_ENABLED);
cpu               755 arch/x86/kernel/acpi/boot.c 	if (cpu < 0) {
cpu               757 arch/x86/kernel/acpi/boot.c 		return cpu;
cpu               761 arch/x86/kernel/acpi/boot.c 	acpi_map_cpu2node(handle, cpu, physid);
cpu               763 arch/x86/kernel/acpi/boot.c 	*pcpu = cpu;
cpu               768 arch/x86/kernel/acpi/boot.c int acpi_unmap_cpu(int cpu)
cpu               771 arch/x86/kernel/acpi/boot.c 	set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
cpu               774 arch/x86/kernel/acpi/boot.c 	per_cpu(x86_cpu_to_apicid, cpu) = -1;
cpu               775 arch/x86/kernel/acpi/boot.c 	set_cpu_present(cpu, false);
cpu                30 arch/x86/kernel/acpi/cstate.c 					unsigned int cpu)
cpu                32 arch/x86/kernel/acpi/cstate.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               145 arch/x86/kernel/acpi/cstate.c int acpi_processor_ffh_cstate_probe(unsigned int cpu,
cpu               149 arch/x86/kernel/acpi/cstate.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               158 arch/x86/kernel/acpi/cstate.c 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
cpu               164 arch/x86/kernel/acpi/cstate.c 	retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
cpu               186 arch/x86/kernel/acpi/cstate.c 	unsigned int cpu = smp_processor_id();
cpu               189 arch/x86/kernel/acpi/cstate.c 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
cpu               379 arch/x86/kernel/amd_nb.c int amd_get_subcaches(int cpu)
cpu               381 arch/x86/kernel/amd_nb.c 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
cpu               389 arch/x86/kernel/amd_nb.c 	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
cpu               392 arch/x86/kernel/amd_nb.c int amd_set_subcaches(int cpu, unsigned long mask)
cpu               395 arch/x86/kernel/amd_nb.c 	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
cpu               415 arch/x86/kernel/amd_nb.c 	cuid = cpu_data(cpu).cpu_core_id;
cpu                60 arch/x86/kernel/apb_timer.c 	int					cpu;
cpu               171 arch/x86/kernel/apb_timer.c 	irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
cpu               178 arch/x86/kernel/apb_timer.c 	int cpu;
cpu               181 arch/x86/kernel/apb_timer.c 	cpu = smp_processor_id();
cpu               182 arch/x86/kernel/apb_timer.c 	if (!cpu)
cpu               187 arch/x86/kernel/apb_timer.c 		adev->timer = dw_apb_clockevent_init(cpu, adev->name,
cpu               196 arch/x86/kernel/apb_timer.c 	       cpu, adev->name, adev->cpu);
cpu               214 arch/x86/kernel/apb_timer.c static int apbt_cpu_dead(unsigned int cpu)
cpu               216 arch/x86/kernel/apb_timer.c 	struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
cpu               220 arch/x86/kernel/apb_timer.c 		pr_debug("skipping APBT CPU %u offline\n", cpu);
cpu               222 arch/x86/kernel/apb_timer.c 		pr_debug("APBT clockevent for cpu %u offline\n", cpu);
cpu               331 arch/x86/kernel/apb_timer.c 		adev->cpu = i;
cpu              1600 arch/x86/kernel/apic/apic.c 	int cpu = smp_processor_id();
cpu              1647 arch/x86/kernel/apic/apic.c 		logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
cpu              1652 arch/x86/kernel/apic/apic.c 		early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
cpu              1727 arch/x86/kernel/apic/apic.c 	if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
cpu              1729 arch/x86/kernel/apic/apic.c 		apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
cpu              1732 arch/x86/kernel/apic/apic.c 		apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
cpu              1740 arch/x86/kernel/apic/apic.c 	if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) ||
cpu              1753 arch/x86/kernel/apic/apic.c 	if (!cpu)
cpu              2399 arch/x86/kernel/apic/apic.c 	int cpu, max = nr_cpu_ids;
cpu              2471 arch/x86/kernel/apic/apic.c 		cpu = 0;
cpu              2476 arch/x86/kernel/apic/apic.c 		cpu = allocate_logical_cpuid(apicid);
cpu              2477 arch/x86/kernel/apic/apic.c 		if (cpu < 0) {
cpu              2488 arch/x86/kernel/apic/apic.c 			   cpu, apicid);
cpu              2494 arch/x86/kernel/apic/apic.c 			boot_cpu_apic_version, cpu, version);
cpu              2501 arch/x86/kernel/apic/apic.c 	early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
cpu              2502 arch/x86/kernel/apic/apic.c 	early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
cpu              2505 arch/x86/kernel/apic/apic.c 	early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
cpu              2506 arch/x86/kernel/apic/apic.c 		apic->x86_32_early_logical_apicid(cpu);
cpu              2508 arch/x86/kernel/apic/apic.c 	set_cpu_possible(cpu, true);
cpu              2510 arch/x86/kernel/apic/apic.c 	set_cpu_present(cpu, true);
cpu              2513 arch/x86/kernel/apic/apic.c 	return cpu;
cpu                 9 arch/x86/kernel/apic/apic_common.c u32 apic_default_calc_apicid(unsigned int cpu)
cpu                11 arch/x86/kernel/apic/apic_common.c 	return per_cpu(x86_cpu_to_apicid, cpu);
cpu                14 arch/x86/kernel/apic/apic_common.c u32 apic_flat_calc_apicid(unsigned int cpu)
cpu                16 arch/x86/kernel/apic/apic_common.c 	return 1U << cpu;
cpu                71 arch/x86/kernel/apic/apic_flat_64.c 	int cpu = smp_processor_id();
cpu                73 arch/x86/kernel/apic/apic_flat_64.c 	if (cpu < BITS_PER_LONG)
cpu                74 arch/x86/kernel/apic/apic_flat_64.c 		__clear_bit(cpu, &mask);
cpu                17 arch/x86/kernel/apic/apic_noop.c static void noop_send_IPI(int cpu, int vector) { }
cpu                83 arch/x86/kernel/apic/apic_noop.c static int noop_x86_32_early_logical_apicid(int cpu)
cpu                94 arch/x86/kernel/apic/apic_numachip.c static void numachip_send_IPI_one(int cpu, int vector)
cpu                96 arch/x86/kernel/apic/apic_numachip.c 	int local_apicid, apicid = per_cpu(x86_cpu_to_apicid, cpu);
cpu               121 arch/x86/kernel/apic/apic_numachip.c 	unsigned int cpu;
cpu               123 arch/x86/kernel/apic/apic_numachip.c 	for_each_cpu(cpu, mask)
cpu               124 arch/x86/kernel/apic/apic_numachip.c 		numachip_send_IPI_one(cpu, vector);
cpu               131 arch/x86/kernel/apic/apic_numachip.c 	unsigned int cpu;
cpu               133 arch/x86/kernel/apic/apic_numachip.c 	for_each_cpu(cpu, mask) {
cpu               134 arch/x86/kernel/apic/apic_numachip.c 		if (cpu != this_cpu)
cpu               135 arch/x86/kernel/apic/apic_numachip.c 			numachip_send_IPI_one(cpu, vector);
cpu               142 arch/x86/kernel/apic/apic_numachip.c 	unsigned int cpu;
cpu               144 arch/x86/kernel/apic/apic_numachip.c 	for_each_online_cpu(cpu) {
cpu               145 arch/x86/kernel/apic/apic_numachip.c 		if (cpu != this_cpu)
cpu               146 arch/x86/kernel/apic/apic_numachip.c 			numachip_send_IPI_one(cpu, vector);
cpu                30 arch/x86/kernel/apic/bigsmp_32.c static int bigsmp_early_logical_apicid(int cpu)
cpu                33 arch/x86/kernel/apic/bigsmp_32.c 	return early_per_cpu(x86_cpu_to_apicid, cpu);
cpu               173 arch/x86/kernel/apic/bigsmp_32.c 	unsigned int cpu;
cpu               180 arch/x86/kernel/apic/bigsmp_32.c 	for_each_possible_cpu(cpu) {
cpu               182 arch/x86/kernel/apic/bigsmp_32.c 				  cpu) == BAD_APICID)
cpu               184 arch/x86/kernel/apic/bigsmp_32.c 		early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
cpu               185 arch/x86/kernel/apic/bigsmp_32.c 			bigsmp_early_logical_apicid(cpu);
cpu                64 arch/x86/kernel/apic/ipi.c void native_smp_send_reschedule(int cpu)
cpu                66 arch/x86/kernel/apic/ipi.c 	if (unlikely(cpu_is_offline(cpu))) {
cpu                67 arch/x86/kernel/apic/ipi.c 		WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
cpu                70 arch/x86/kernel/apic/ipi.c 	apic->send_IPI(cpu, RESCHEDULE_VECTOR);
cpu                73 arch/x86/kernel/apic/ipi.c void native_send_call_func_single_ipi(int cpu)
cpu                75 arch/x86/kernel/apic/ipi.c 	apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
cpu                81 arch/x86/kernel/apic/ipi.c 		unsigned int cpu = smp_processor_id();
cpu                83 arch/x86/kernel/apic/ipi.c 		if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
cpu                86 arch/x86/kernel/apic/ipi.c 		if (cpumask_test_cpu(cpu, mask))
cpu               174 arch/x86/kernel/apic/ipi.c void default_send_IPI_single_phys(int cpu, int vector)
cpu               179 arch/x86/kernel/apic/ipi.c 	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
cpu               224 arch/x86/kernel/apic/ipi.c void default_send_IPI_single(int cpu, int vector)
cpu               226 arch/x86/kernel/apic/ipi.c 	apic->send_IPI_mask(cpumask_of(cpu), vector);
cpu                56 arch/x86/kernel/apic/local.h void default_send_IPI_single(int cpu, int vector);
cpu                57 arch/x86/kernel/apic/local.h void default_send_IPI_single_phys(int cpu, int vector);
cpu                66 arch/x86/kernel/apic/msi.c 	unsigned int cpu;
cpu                70 arch/x86/kernel/apic/msi.c 	cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
cpu               103 arch/x86/kernel/apic/msi.c 	if (WARN_ON_ONCE(cpu != smp_processor_id())) {
cpu                18 arch/x86/kernel/apic/probe_32.c static int default_x86_32_early_logical_apicid(int cpu)
cpu                20 arch/x86/kernel/apic/probe_32.c 	return 1 << cpu;
cpu                30 arch/x86/kernel/apic/vector.c 	unsigned int		cpu;
cpu               118 arch/x86/kernel/apic/vector.c 				unsigned int cpu)
cpu               125 arch/x86/kernel/apic/vector.c 	apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
cpu               126 arch/x86/kernel/apic/vector.c 	irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
cpu               127 arch/x86/kernel/apic/vector.c 	trace_vector_config(irqd->irq, vector, cpu,
cpu               141 arch/x86/kernel/apic/vector.c 			    apicd->cpu);
cpu               160 arch/x86/kernel/apic/vector.c 	if (cpu_online(apicd->cpu)) {
cpu               163 arch/x86/kernel/apic/vector.c 		apicd->prev_cpu = apicd->cpu;
cpu               165 arch/x86/kernel/apic/vector.c 		irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
cpu               171 arch/x86/kernel/apic/vector.c 	apicd->cpu = newcpu;
cpu               178 arch/x86/kernel/apic/vector.c 	unsigned int cpu = cpumask_first(cpu_online_mask);
cpu               180 arch/x86/kernel/apic/vector.c 	apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
cpu               225 arch/x86/kernel/apic/vector.c 	unsigned int cpu = apicd->cpu;
cpu               235 arch/x86/kernel/apic/vector.c 	if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
cpu               247 arch/x86/kernel/apic/vector.c 	vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
cpu               251 arch/x86/kernel/apic/vector.c 	apic_update_vector(irqd, vector, cpu);
cpu               252 arch/x86/kernel/apic/vector.c 	apic_update_irq_cfg(irqd, vector, cpu);
cpu               312 arch/x86/kernel/apic/vector.c 	int vector, cpu;
cpu               317 arch/x86/kernel/apic/vector.c 	if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
cpu               320 arch/x86/kernel/apic/vector.c 					  &cpu);
cpu               324 arch/x86/kernel/apic/vector.c 	apic_update_vector(irqd, vector, cpu);
cpu               325 arch/x86/kernel/apic/vector.c 	apic_update_irq_cfg(irqd, vector, cpu);
cpu               340 arch/x86/kernel/apic/vector.c 	trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
cpu               343 arch/x86/kernel/apic/vector.c 	per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
cpu               344 arch/x86/kernel/apic/vector.c 	irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
cpu               507 arch/x86/kernel/apic/vector.c 	apicd->cpu = 0;
cpu               516 arch/x86/kernel/apic/vector.c 		apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
cpu               617 arch/x86/kernel/apic/vector.c 	seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
cpu               807 arch/x86/kernel/apic/vector.c 	apic->send_IPI(apicd->cpu, apicd->vector);
cpu               837 arch/x86/kernel/apic/vector.c 	unsigned int cpu = apicd->prev_cpu;
cpu               848 arch/x86/kernel/apic/vector.c 	trace_vector_free_moved(apicd->irq, cpu, vector, managed);
cpu               849 arch/x86/kernel/apic/vector.c 	irq_matrix_free(vector_matrix, cpu, vector, managed);
cpu               850 arch/x86/kernel/apic/vector.c 	per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
cpu               892 arch/x86/kernel/apic/vector.c 	unsigned int cpu;
cpu               896 arch/x86/kernel/apic/vector.c 	cpu = apicd->prev_cpu;
cpu               897 arch/x86/kernel/apic/vector.c 	if (cpu_online(cpu)) {
cpu               898 arch/x86/kernel/apic/vector.c 		hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
cpu               899 arch/x86/kernel/apic/vector.c 		apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
cpu               923 arch/x86/kernel/apic/vector.c 	if (vector == apicd->vector && apicd->cpu == smp_processor_id())
cpu              1030 arch/x86/kernel/apic/vector.c 	unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
cpu              1038 arch/x86/kernel/apic/vector.c 			cpu, tomove, avl);
cpu              1175 arch/x86/kernel/apic/vector.c 	int cpu;
cpu              1181 arch/x86/kernel/apic/vector.c 	for_each_online_cpu(cpu) {
cpu              1182 arch/x86/kernel/apic/vector.c 		if (cpu >= maxcpu)
cpu              1184 arch/x86/kernel/apic/vector.c 		smp_call_function_single(cpu, print_local_APIC, NULL, 1);
cpu                28 arch/x86/kernel/apic/x2apic_cluster.c static void x2apic_send_IPI(int cpu, int vector)
cpu                30 arch/x86/kernel/apic/x2apic_cluster.c 	u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
cpu                39 arch/x86/kernel/apic/x2apic_cluster.c 	unsigned int cpu, clustercpu;
cpu                54 arch/x86/kernel/apic/x2apic_cluster.c 	for_each_cpu(cpu, tmpmsk) {
cpu                55 arch/x86/kernel/apic/x2apic_cluster.c 		struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
cpu                93 arch/x86/kernel/apic/x2apic_cluster.c static u32 x2apic_calc_apicid(unsigned int cpu)
cpu                95 arch/x86/kernel/apic/x2apic_cluster.c 	return per_cpu(x86_cpu_to_logical_apicid, cpu);
cpu               102 arch/x86/kernel/apic/x2apic_cluster.c 	unsigned int cpu;
cpu               110 arch/x86/kernel/apic/x2apic_cluster.c 	for_each_online_cpu(cpu) {
cpu               111 arch/x86/kernel/apic/x2apic_cluster.c 		cmsk = per_cpu(cluster_masks, cpu);
cpu               124 arch/x86/kernel/apic/x2apic_cluster.c static int alloc_clustermask(unsigned int cpu, int node)
cpu               126 arch/x86/kernel/apic/x2apic_cluster.c 	if (per_cpu(cluster_masks, cpu))
cpu               146 arch/x86/kernel/apic/x2apic_cluster.c static int x2apic_prepare_cpu(unsigned int cpu)
cpu               148 arch/x86/kernel/apic/x2apic_cluster.c 	if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
cpu               150 arch/x86/kernel/apic/x2apic_cluster.c 	if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
cpu                36 arch/x86/kernel/apic/x2apic_phys.c static void x2apic_send_IPI(int cpu, int vector)
cpu                38 arch/x86/kernel/apic/x2apic_phys.c 	u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
cpu               542 arch/x86/kernel/apic/x2apic_uv_x.c static void uv_send_IPI_one(int cpu, int vector)
cpu               547 arch/x86/kernel/apic/x2apic_uv_x.c 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
cpu               554 arch/x86/kernel/apic/x2apic_uv_x.c 	unsigned int cpu;
cpu               556 arch/x86/kernel/apic/x2apic_uv_x.c 	for_each_cpu(cpu, mask)
cpu               557 arch/x86/kernel/apic/x2apic_uv_x.c 		uv_send_IPI_one(cpu, vector);
cpu               563 arch/x86/kernel/apic/x2apic_uv_x.c 	unsigned int cpu;
cpu               565 arch/x86/kernel/apic/x2apic_uv_x.c 	for_each_cpu(cpu, mask) {
cpu               566 arch/x86/kernel/apic/x2apic_uv_x.c 		if (cpu != this_cpu)
cpu               567 arch/x86/kernel/apic/x2apic_uv_x.c 			uv_send_IPI_one(cpu, vector);
cpu               574 arch/x86/kernel/apic/x2apic_uv_x.c 	unsigned int cpu;
cpu               576 arch/x86/kernel/apic/x2apic_uv_x.c 	for_each_online_cpu(cpu) {
cpu               577 arch/x86/kernel/apic/x2apic_uv_x.c 		if (cpu != this_cpu)
cpu               578 arch/x86/kernel/apic/x2apic_uv_x.c 			uv_send_IPI_one(cpu, vector);
cpu               601 arch/x86/kernel/apic/x2apic_uv_x.c static u32 apic_uv_calc_apicid(unsigned int cpu)
cpu               603 arch/x86/kernel/apic/x2apic_uv_x.c 	return apic_default_calc_apicid(cpu) | uv_apicid_hibits;
cpu              1006 arch/x86/kernel/apic/x2apic_uv_x.c static int uv_heartbeat_enable(unsigned int cpu)
cpu              1008 arch/x86/kernel/apic/x2apic_uv_x.c 	while (!uv_cpu_scir_info(cpu)->enabled) {
cpu              1009 arch/x86/kernel/apic/x2apic_uv_x.c 		struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
cpu              1011 arch/x86/kernel/apic/x2apic_uv_x.c 		uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
cpu              1014 arch/x86/kernel/apic/x2apic_uv_x.c 		add_timer_on(timer, cpu);
cpu              1015 arch/x86/kernel/apic/x2apic_uv_x.c 		uv_cpu_scir_info(cpu)->enabled = 1;
cpu              1018 arch/x86/kernel/apic/x2apic_uv_x.c 		cpu = 0;
cpu              1024 arch/x86/kernel/apic/x2apic_uv_x.c static int uv_heartbeat_disable(unsigned int cpu)
cpu              1026 arch/x86/kernel/apic/x2apic_uv_x.c 	if (uv_cpu_scir_info(cpu)->enabled) {
cpu              1027 arch/x86/kernel/apic/x2apic_uv_x.c 		uv_cpu_scir_info(cpu)->enabled = 0;
cpu              1028 arch/x86/kernel/apic/x2apic_uv_x.c 		del_timer(&uv_cpu_scir_info(cpu)->timer);
cpu              1030 arch/x86/kernel/apic/x2apic_uv_x.c 	uv_set_cpu_scir_bits(cpu, 0xff);
cpu              1048 arch/x86/kernel/apic/x2apic_uv_x.c 	int cpu;
cpu              1051 arch/x86/kernel/apic/x2apic_uv_x.c 		for_each_online_cpu(cpu)
cpu              1052 arch/x86/kernel/apic/x2apic_uv_x.c 			uv_heartbeat_enable(cpu);
cpu              1320 arch/x86/kernel/apic/x2apic_uv_x.c 	int cpu, i, lnid;
cpu              1375 arch/x86/kernel/apic/x2apic_uv_x.c 	for_each_present_cpu(cpu) {
cpu              1376 arch/x86/kernel/apic/x2apic_uv_x.c 		int nid = cpu_to_node(cpu);
cpu              1382 arch/x86/kernel/apic/x2apic_uv_x.c 		apicid = per_cpu(x86_cpu_to_apicid, cpu);
cpu              1440 arch/x86/kernel/apic/x2apic_uv_x.c 	int bytes, cpu, nodeid;
cpu              1511 arch/x86/kernel/apic/x2apic_uv_x.c 	for_each_possible_cpu(cpu) {
cpu              1512 arch/x86/kernel/apic/x2apic_uv_x.c 		int apicid = per_cpu(x86_cpu_to_apicid, cpu);
cpu              1516 arch/x86/kernel/apic/x2apic_uv_x.c 		nodeid = cpu_to_node(cpu);
cpu              1517 arch/x86/kernel/apic/x2apic_uv_x.c 		numa_node_id = numa_cpu_node(cpu);
cpu              1520 arch/x86/kernel/apic/x2apic_uv_x.c 		uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid);
cpu              1521 arch/x86/kernel/apic/x2apic_uv_x.c 		uv_cpu_info_per(cpu)->blade_cpu_id = uv_cpu_hub_info(cpu)->nr_possible_cpus++;
cpu              1522 arch/x86/kernel/apic/x2apic_uv_x.c 		if (uv_cpu_hub_info(cpu)->memory_nid == NUMA_NO_NODE)
cpu              1523 arch/x86/kernel/apic/x2apic_uv_x.c 			uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu);
cpu              1529 arch/x86/kernel/apic/x2apic_uv_x.c 		else if (uv_cpu_hub_info(cpu)->pnode == 0xffff)
cpu              1530 arch/x86/kernel/apic/x2apic_uv_x.c 			uv_cpu_hub_info(cpu)->pnode = pnode;
cpu              1532 arch/x86/kernel/apic/x2apic_uv_x.c 		uv_cpu_scir_info(cpu)->offset = uv_scir_offset(apicid);
cpu               597 arch/x86/kernel/apm_32.c 	int			cpu;
cpu               602 arch/x86/kernel/apm_32.c 	cpu = get_cpu();
cpu               603 arch/x86/kernel/apm_32.c 	BUG_ON(cpu != 0);
cpu               604 arch/x86/kernel/apm_32.c 	gdt = get_cpu_gdt_rw(cpu);
cpu               675 arch/x86/kernel/apm_32.c 	int			cpu;
cpu               680 arch/x86/kernel/apm_32.c 	cpu = get_cpu();
cpu               681 arch/x86/kernel/apm_32.c 	BUG_ON(cpu != 0);
cpu               682 arch/x86/kernel/apm_32.c 	gdt = get_cpu_gdt_rw(cpu);
cpu                71 arch/x86/kernel/asm-offsets.c 	OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret);
cpu                41 arch/x86/kernel/asm-offsets_64.c 	       cpu.usergs_sysret64);
cpu                42 arch/x86/kernel/asm-offsets_64.c 	OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs);
cpu                32 arch/x86/kernel/cpu/amd.c static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
cpu               339 arch/x86/kernel/cpu/amd.c 	int cpu = smp_processor_id();
cpu               368 arch/x86/kernel/cpu/amd.c 		cacheinfo_amd_init_llc_id(c, cpu, node_id);
cpu               376 arch/x86/kernel/cpu/amd.c 		per_cpu(cpu_llc_id, cpu) = node_id;
cpu               393 arch/x86/kernel/cpu/amd.c 	int cpu = smp_processor_id();
cpu               401 arch/x86/kernel/cpu/amd.c 	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
cpu               404 arch/x86/kernel/cpu/amd.c u16 amd_get_nb_id(int cpu)
cpu               406 arch/x86/kernel/cpu/amd.c 	return per_cpu(cpu_llc_id, cpu);
cpu               419 arch/x86/kernel/cpu/amd.c 	int cpu = smp_processor_id();
cpu               423 arch/x86/kernel/cpu/amd.c 	node = numa_cpu_node(cpu);
cpu               425 arch/x86/kernel/cpu/amd.c 		node = per_cpu(cpu_llc_id, cpu);
cpu               463 arch/x86/kernel/cpu/amd.c 	numa_set_node(cpu, node);
cpu              1122 arch/x86/kernel/cpu/amd.c static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
cpu              1129 arch/x86/kernel/cpu/amd.c 	    cpu_has(cpu, X86_FEATURE_OSVW)) {
cpu              1143 arch/x86/kernel/cpu/amd.c 	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
cpu              1145 arch/x86/kernel/cpu/amd.c 		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
cpu                67 arch/x86/kernel/cpu/aperfmperf.c static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
cpu                69 arch/x86/kernel/cpu/aperfmperf.c 	s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
cpu                75 arch/x86/kernel/cpu/aperfmperf.c 	smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
cpu                81 arch/x86/kernel/cpu/aperfmperf.c unsigned int aperfmperf_get_khz(int cpu)
cpu                89 arch/x86/kernel/cpu/aperfmperf.c 	if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
cpu                92 arch/x86/kernel/cpu/aperfmperf.c 	aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
cpu                93 arch/x86/kernel/cpu/aperfmperf.c 	return per_cpu(samples.khz, cpu);
cpu               100 arch/x86/kernel/cpu/aperfmperf.c 	int cpu;
cpu               108 arch/x86/kernel/cpu/aperfmperf.c 	for_each_online_cpu(cpu) {
cpu               109 arch/x86/kernel/cpu/aperfmperf.c 		if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
cpu               111 arch/x86/kernel/cpu/aperfmperf.c 		if (!aperfmperf_snapshot_cpu(cpu, now, false))
cpu               119 arch/x86/kernel/cpu/aperfmperf.c unsigned int arch_freq_get_on_cpu(int cpu)
cpu               127 arch/x86/kernel/cpu/aperfmperf.c 	if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
cpu               130 arch/x86/kernel/cpu/aperfmperf.c 	if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
cpu               131 arch/x86/kernel/cpu/aperfmperf.c 		return per_cpu(samples.khz, cpu);
cpu               134 arch/x86/kernel/cpu/aperfmperf.c 	smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
cpu               136 arch/x86/kernel/cpu/aperfmperf.c 	return per_cpu(samples.khz, cpu);
cpu               373 arch/x86/kernel/cpu/cacheinfo.c static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
cpu               396 arch/x86/kernel/cpu/cacheinfo.c 		wbinvd_on_cpu(cpu);
cpu               413 arch/x86/kernel/cpu/cacheinfo.c static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
cpu               430 arch/x86/kernel/cpu/cacheinfo.c 	amd_l3_disable_index(nb, cpu, slot, index);
cpu               440 arch/x86/kernel/cpu/cacheinfo.c 	int cpu, err = 0;
cpu               446 arch/x86/kernel/cpu/cacheinfo.c 	cpu = cpumask_first(&this_leaf->shared_cpu_map);
cpu               451 arch/x86/kernel/cpu/cacheinfo.c 	err = amd_set_l3_disable_slot(nb, cpu, slot, val);
cpu               477 arch/x86/kernel/cpu/cacheinfo.c 	int cpu = cpumask_first(&this_leaf->shared_cpu_map);
cpu               479 arch/x86/kernel/cpu/cacheinfo.c 	return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
cpu               487 arch/x86/kernel/cpu/cacheinfo.c 	int cpu = cpumask_first(&this_leaf->shared_cpu_map);
cpu               496 arch/x86/kernel/cpu/cacheinfo.c 	if (amd_set_subcaches(cpu, val))
cpu               649 arch/x86/kernel/cpu/cacheinfo.c void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
cpu               660 arch/x86/kernel/cpu/cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = node_id;
cpu               666 arch/x86/kernel/cpu/cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
cpu               682 arch/x86/kernel/cpu/cacheinfo.c 			per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
cpu               687 arch/x86/kernel/cpu/cacheinfo.c void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
cpu               700 arch/x86/kernel/cpu/cacheinfo.c 	per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
cpu               729 arch/x86/kernel/cpu/cacheinfo.c 	unsigned int cpu = c->cpu_index;
cpu               848 arch/x86/kernel/cpu/cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = l2_id;
cpu               855 arch/x86/kernel/cpu/cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = l3_id;
cpu               867 arch/x86/kernel/cpu/cacheinfo.c 	if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
cpu               868 arch/x86/kernel/cpu/cacheinfo.c 		per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
cpu               877 arch/x86/kernel/cpu/cacheinfo.c static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
cpu               880 arch/x86/kernel/cpu/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu               889 arch/x86/kernel/cpu/cacheinfo.c 		for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
cpu               894 arch/x86/kernel/cpu/cacheinfo.c 			for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
cpu               905 arch/x86/kernel/cpu/cacheinfo.c 		apicid = cpu_data(cpu).apicid;
cpu               934 arch/x86/kernel/cpu/cacheinfo.c static void __cache_cpumap_setup(unsigned int cpu, int index,
cpu               937 arch/x86/kernel/cpu/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu               941 arch/x86/kernel/cpu/cacheinfo.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               945 arch/x86/kernel/cpu/cacheinfo.c 		if (__cache_amd_cpumap_setup(cpu, index, base))
cpu               952 arch/x86/kernel/cpu/cacheinfo.c 	cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
cpu               962 arch/x86/kernel/cpu/cacheinfo.c 			if (i == cpu || !sib_cpu_ci->info_list)
cpu               966 arch/x86/kernel/cpu/cacheinfo.c 			cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map);
cpu               988 arch/x86/kernel/cpu/cacheinfo.c static int __init_cache_level(unsigned int cpu)
cpu               990 arch/x86/kernel/cpu/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu              1006 arch/x86/kernel/cpu/cacheinfo.c static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
cpu              1008 arch/x86/kernel/cpu/cacheinfo.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu              1017 arch/x86/kernel/cpu/cacheinfo.c static int __populate_cache_leaves(unsigned int cpu)
cpu              1020 arch/x86/kernel/cpu/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu              1028 arch/x86/kernel/cpu/cacheinfo.c 		get_cache_id(cpu, &id4_regs);
cpu              1030 arch/x86/kernel/cpu/cacheinfo.c 		__cache_cpumap_setup(cpu, idx, &id4_regs);
cpu               571 arch/x86/kernel/cpu/common.c void load_percpu_segment(int cpu)
cpu               577 arch/x86/kernel/cpu/common.c 	wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
cpu               588 arch/x86/kernel/cpu/common.c void load_direct_gdt(int cpu)
cpu               592 arch/x86/kernel/cpu/common.c 	gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
cpu               599 arch/x86/kernel/cpu/common.c void load_fixmap_gdt(int cpu)
cpu               603 arch/x86/kernel/cpu/common.c 	gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
cpu               613 arch/x86/kernel/cpu/common.c void switch_to_new_gdt(int cpu)
cpu               616 arch/x86/kernel/cpu/common.c 	load_direct_gdt(cpu);
cpu               618 arch/x86/kernel/cpu/common.c 	load_percpu_segment(cpu);
cpu              1420 arch/x86/kernel/cpu/common.c 		if (pv_ops.cpu.iret == native_iret)
cpu              1450 arch/x86/kernel/cpu/common.c 	unsigned int apicid, cpu = smp_processor_id();
cpu              1452 arch/x86/kernel/cpu/common.c 	apicid = apic->cpu_present_to_apicid(cpu);
cpu              1456 arch/x86/kernel/cpu/common.c 		       cpu, apicid, c->initial_apicid);
cpu              1458 arch/x86/kernel/cpu/common.c 	BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
cpu              1459 arch/x86/kernel/cpu/common.c 	BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
cpu              1595 arch/x86/kernel/cpu/common.c 	int cpu;
cpu              1600 arch/x86/kernel/cpu/common.c 	cpu = get_cpu();
cpu              1601 arch/x86/kernel/cpu/common.c 	tss = &per_cpu(cpu_tss_rw, cpu);
cpu              1610 arch/x86/kernel/cpu/common.c 	wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
cpu              1810 arch/x86/kernel/cpu/common.c static void wait_for_master_cpu(int cpu)
cpu              1817 arch/x86/kernel/cpu/common.c 	WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
cpu              1818 arch/x86/kernel/cpu/common.c 	while (!cpumask_test_cpu(cpu, cpu_callout_mask))
cpu              1824 arch/x86/kernel/cpu/common.c static void setup_getcpu(int cpu)
cpu              1826 arch/x86/kernel/cpu/common.c 	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
cpu              1842 arch/x86/kernel/cpu/common.c 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
cpu              1856 arch/x86/kernel/cpu/common.c 	int cpu = raw_smp_processor_id();
cpu              1861 arch/x86/kernel/cpu/common.c 	wait_for_master_cpu(cpu);
cpu              1863 arch/x86/kernel/cpu/common.c 	if (cpu)
cpu              1866 arch/x86/kernel/cpu/common.c 	t = &per_cpu(cpu_tss_rw, cpu);
cpu              1870 arch/x86/kernel/cpu/common.c 	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
cpu              1871 arch/x86/kernel/cpu/common.c 		set_numa_node(early_cpu_to_node(cpu));
cpu              1873 arch/x86/kernel/cpu/common.c 	setup_getcpu(cpu);
cpu              1877 arch/x86/kernel/cpu/common.c 	pr_debug("Initializing CPU#%d\n", cpu);
cpu              1886 arch/x86/kernel/cpu/common.c 	switch_to_new_gdt(cpu);
cpu              1930 arch/x86/kernel/cpu/common.c 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
cpu              1932 arch/x86/kernel/cpu/common.c 	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
cpu              1944 arch/x86/kernel/cpu/common.c 	load_fixmap_gdt(cpu);
cpu              1951 arch/x86/kernel/cpu/common.c 	int cpu = smp_processor_id();
cpu              1953 arch/x86/kernel/cpu/common.c 	struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
cpu              1955 arch/x86/kernel/cpu/common.c 	wait_for_master_cpu(cpu);
cpu              1959 arch/x86/kernel/cpu/common.c 	pr_info("Initializing CPU#%d\n", cpu);
cpu              1967 arch/x86/kernel/cpu/common.c 	switch_to_new_gdt(cpu);
cpu              1982 arch/x86/kernel/cpu/common.c 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
cpu              1984 arch/x86/kernel/cpu/common.c 	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
cpu              1992 arch/x86/kernel/cpu/common.c 	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
cpu              2000 arch/x86/kernel/cpu/common.c 	load_fixmap_gdt(cpu);
cpu                77 arch/x86/kernel/cpu/cpu.h unsigned int aperfmperf_get_khz(int cpu);
cpu                68 arch/x86/kernel/cpu/hygon.c 	int cpu = smp_processor_id();
cpu                95 arch/x86/kernel/cpu/hygon.c 		cacheinfo_hygon_init_llc_id(c, cpu, node_id);
cpu               102 arch/x86/kernel/cpu/hygon.c 		per_cpu(cpu_llc_id, cpu) = node_id;
cpu               117 arch/x86/kernel/cpu/hygon.c 	int cpu = smp_processor_id();
cpu               125 arch/x86/kernel/cpu/hygon.c 	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
cpu               131 arch/x86/kernel/cpu/hygon.c 	int cpu = smp_processor_id();
cpu               135 arch/x86/kernel/cpu/hygon.c 	node = numa_cpu_node(cpu);
cpu               137 arch/x86/kernel/cpu/hygon.c 		node = per_cpu(cpu_llc_id, cpu);
cpu               174 arch/x86/kernel/cpu/hygon.c 	numa_set_node(cpu, node);
cpu               484 arch/x86/kernel/cpu/intel.c 	int cpu = smp_processor_id();
cpu               488 arch/x86/kernel/cpu/intel.c 	node = numa_cpu_node(cpu);
cpu               491 arch/x86/kernel/cpu/intel.c 		node = cpu_to_node(cpu);
cpu               493 arch/x86/kernel/cpu/intel.c 	numa_set_node(cpu, node);
cpu               124 arch/x86/kernel/cpu/intel_epb.c 	unsigned int cpu = dev->id;
cpu               128 arch/x86/kernel/cpu/intel_epb.c 	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
cpu               139 arch/x86/kernel/cpu/intel_epb.c 	unsigned int cpu = dev->id;
cpu               150 arch/x86/kernel/cpu/intel_epb.c 	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
cpu               154 arch/x86/kernel/cpu/intel_epb.c 	ret = wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS,
cpu               174 arch/x86/kernel/cpu/intel_epb.c static int intel_epb_online(unsigned int cpu)
cpu               176 arch/x86/kernel/cpu/intel_epb.c 	struct device *cpu_dev = get_cpu_device(cpu);
cpu               185 arch/x86/kernel/cpu/intel_epb.c static int intel_epb_offline(unsigned int cpu)
cpu               187 arch/x86/kernel/cpu/intel_epb.c 	struct device *cpu_dev = get_cpu_device(cpu);
cpu               207 arch/x86/kernel/cpu/mce/amd.c static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu)
cpu               225 arch/x86/kernel/cpu/mce/amd.c 		per_cpu(smca_misc_banks_map, cpu) |= BIT(bank);
cpu               229 arch/x86/kernel/cpu/mce/amd.c static void smca_configure(unsigned int bank, unsigned int cpu)
cpu               266 arch/x86/kernel/cpu/mce/amd.c 	smca_set_misc_banks_map(bank, cpu);
cpu               353 arch/x86/kernel/cpu/mce/amd.c 		       "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
cpu               369 arch/x86/kernel/cpu/mce/amd.c 		       b->cpu, apic, b->bank, b->block, b->address, hi, lo);
cpu               479 arch/x86/kernel/cpu/mce/amd.c 				  unsigned int cpu)
cpu               484 arch/x86/kernel/cpu/mce/amd.c 	if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank)))
cpu               492 arch/x86/kernel/cpu/mce/amd.c 			     unsigned int cpu)
cpu               496 arch/x86/kernel/cpu/mce/amd.c 	if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
cpu               500 arch/x86/kernel/cpu/mce/amd.c 		return smca_get_block_address(bank, block, cpu);
cpu               522 arch/x86/kernel/cpu/mce/amd.c 	unsigned int cpu = smp_processor_id();
cpu               528 arch/x86/kernel/cpu/mce/amd.c 		per_cpu(bank_map, cpu) |= (1 << bank);
cpu               531 arch/x86/kernel/cpu/mce/amd.c 	b.cpu			= cpu;
cpu               628 arch/x86/kernel/cpu/mce/amd.c 	unsigned int bank, block, cpu = smp_processor_id();
cpu               635 arch/x86/kernel/cpu/mce/amd.c 			smca_configure(bank, cpu);
cpu               640 arch/x86/kernel/cpu/mce/amd.c 			address = get_block_address(address, low, high, bank, block, cpu);
cpu              1017 arch/x86/kernel/cpu/mce/amd.c 	unsigned int bank, cpu = smp_processor_id();
cpu              1020 arch/x86/kernel/cpu/mce/amd.c 		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
cpu              1023 arch/x86/kernel/cpu/mce/amd.c 		first_block = per_cpu(threshold_banks, cpu)[bank]->blocks;
cpu              1072 arch/x86/kernel/cpu/mce/amd.c 	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
cpu              1096 arch/x86/kernel/cpu/mce/amd.c 	smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
cpu              1105 arch/x86/kernel/cpu/mce/amd.c 	rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
cpu              1202 arch/x86/kernel/cpu/mce/amd.c static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
cpu              1210 arch/x86/kernel/cpu/mce/amd.c 	if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
cpu              1213 arch/x86/kernel/cpu/mce/amd.c 	if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
cpu              1233 arch/x86/kernel/cpu/mce/amd.c 	b->cpu			= cpu;
cpu              1257 arch/x86/kernel/cpu/mce/amd.c 	address = get_block_address(address, low, high, bank, ++block, cpu);
cpu              1261 arch/x86/kernel/cpu/mce/amd.c 	err = allocate_threshold_blocks(cpu, tb, bank, block, address);
cpu              1303 arch/x86/kernel/cpu/mce/amd.c static int threshold_create_bank(unsigned int cpu, unsigned int bank)
cpu              1305 arch/x86/kernel/cpu/mce/amd.c 	struct device *dev = per_cpu(mce_device, cpu);
cpu              1315 arch/x86/kernel/cpu/mce/amd.c 		nb = node_to_amd_nb(amd_get_nb_id(cpu));
cpu              1325 arch/x86/kernel/cpu/mce/amd.c 			per_cpu(threshold_banks, cpu)[bank] = b;
cpu              1356 arch/x86/kernel/cpu/mce/amd.c 	err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
cpu              1360 arch/x86/kernel/cpu/mce/amd.c 	per_cpu(threshold_banks, cpu)[bank] = b;
cpu              1376 arch/x86/kernel/cpu/mce/amd.c static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
cpu              1380 arch/x86/kernel/cpu/mce/amd.c 	struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
cpu              1404 arch/x86/kernel/cpu/mce/amd.c static void threshold_remove_bank(unsigned int cpu, int bank)
cpu              1409 arch/x86/kernel/cpu/mce/amd.c 	b = per_cpu(threshold_banks, cpu)[bank];
cpu              1419 arch/x86/kernel/cpu/mce/amd.c 			per_cpu(threshold_banks, cpu)[bank] = NULL;
cpu              1426 arch/x86/kernel/cpu/mce/amd.c 			nb = node_to_amd_nb(amd_get_nb_id(cpu));
cpu              1431 arch/x86/kernel/cpu/mce/amd.c 	deallocate_threshold_block(cpu, bank);
cpu              1437 arch/x86/kernel/cpu/mce/amd.c 	per_cpu(threshold_banks, cpu)[bank] = NULL;
cpu              1440 arch/x86/kernel/cpu/mce/amd.c int mce_threshold_remove_device(unsigned int cpu)
cpu              1444 arch/x86/kernel/cpu/mce/amd.c 	for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
cpu              1445 arch/x86/kernel/cpu/mce/amd.c 		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
cpu              1447 arch/x86/kernel/cpu/mce/amd.c 		threshold_remove_bank(cpu, bank);
cpu              1449 arch/x86/kernel/cpu/mce/amd.c 	kfree(per_cpu(threshold_banks, cpu));
cpu              1450 arch/x86/kernel/cpu/mce/amd.c 	per_cpu(threshold_banks, cpu) = NULL;
cpu              1455 arch/x86/kernel/cpu/mce/amd.c int mce_threshold_create_device(unsigned int cpu)
cpu              1461 arch/x86/kernel/cpu/mce/amd.c 	bp = per_cpu(threshold_banks, cpu);
cpu              1465 arch/x86/kernel/cpu/mce/amd.c 	bp = kcalloc(per_cpu(mce_num_banks, cpu), sizeof(struct threshold_bank *),
cpu              1470 arch/x86/kernel/cpu/mce/amd.c 	per_cpu(threshold_banks, cpu) = bp;
cpu              1472 arch/x86/kernel/cpu/mce/amd.c 	for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
cpu              1473 arch/x86/kernel/cpu/mce/amd.c 		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
cpu              1475 arch/x86/kernel/cpu/mce/amd.c 		err = threshold_create_bank(cpu, bank);
cpu              1481 arch/x86/kernel/cpu/mce/amd.c 	mce_threshold_remove_device(cpu);
cpu               136 arch/x86/kernel/cpu/mce/core.c 	m->cpu = m->extcpu = smp_processor_id();
cpu               899 arch/x86/kernel/cpu/mce/core.c 	int cpu;
cpu               910 arch/x86/kernel/cpu/mce/core.c 	for_each_possible_cpu(cpu) {
cpu               911 arch/x86/kernel/cpu/mce/core.c 		int severity = mce_severity(&per_cpu(mces_seen, cpu),
cpu               917 arch/x86/kernel/cpu/mce/core.c 			m = &per_cpu(mces_seen, cpu);
cpu               946 arch/x86/kernel/cpu/mce/core.c 	for_each_possible_cpu(cpu)
cpu               947 arch/x86/kernel/cpu/mce/core.c 		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
cpu              1130 arch/x86/kernel/cpu/mce/core.c static bool __mc_check_crashing_cpu(int cpu)
cpu              1132 arch/x86/kernel/cpu/mce/core.c 	if (cpu_is_offline(cpu) ||
cpu              1133 arch/x86/kernel/cpu/mce/core.c 	    (crashing_cpu != -1 && crashing_cpu != cpu)) {
cpu              1230 arch/x86/kernel/cpu/mce/core.c 	int cpu = smp_processor_id();
cpu              1259 arch/x86/kernel/cpu/mce/core.c 	if (__mc_check_crashing_cpu(cpu))
cpu              1463 arch/x86/kernel/cpu/mce/core.c 	int cpu;
cpu              1465 arch/x86/kernel/cpu/mce/core.c 	for_each_online_cpu(cpu)
cpu              1466 arch/x86/kernel/cpu/mce/core.c 		del_timer_sync(&per_cpu(mce_timer, cpu));
cpu              2268 arch/x86/kernel/cpu/mce/core.c static int mce_device_create(unsigned int cpu)
cpu              2277 arch/x86/kernel/cpu/mce/core.c 	dev = per_cpu(mce_device, cpu);
cpu              2284 arch/x86/kernel/cpu/mce/core.c 	dev->id  = cpu;
cpu              2299 arch/x86/kernel/cpu/mce/core.c 	for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
cpu              2304 arch/x86/kernel/cpu/mce/core.c 	cpumask_set_cpu(cpu, mce_device_initialized);
cpu              2305 arch/x86/kernel/cpu/mce/core.c 	per_cpu(mce_device, cpu) = dev;
cpu              2320 arch/x86/kernel/cpu/mce/core.c static void mce_device_remove(unsigned int cpu)
cpu              2322 arch/x86/kernel/cpu/mce/core.c 	struct device *dev = per_cpu(mce_device, cpu);
cpu              2325 arch/x86/kernel/cpu/mce/core.c 	if (!cpumask_test_cpu(cpu, mce_device_initialized))
cpu              2331 arch/x86/kernel/cpu/mce/core.c 	for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
cpu              2335 arch/x86/kernel/cpu/mce/core.c 	cpumask_clear_cpu(cpu, mce_device_initialized);
cpu              2336 arch/x86/kernel/cpu/mce/core.c 	per_cpu(mce_device, cpu) = NULL;
cpu              2369 arch/x86/kernel/cpu/mce/core.c static int mce_cpu_dead(unsigned int cpu)
cpu              2371 arch/x86/kernel/cpu/mce/core.c 	mce_intel_hcpu_update(cpu);
cpu              2379 arch/x86/kernel/cpu/mce/core.c static int mce_cpu_online(unsigned int cpu)
cpu              2384 arch/x86/kernel/cpu/mce/core.c 	mce_device_create(cpu);
cpu              2386 arch/x86/kernel/cpu/mce/core.c 	ret = mce_threshold_create_device(cpu);
cpu              2388 arch/x86/kernel/cpu/mce/core.c 		mce_device_remove(cpu);
cpu              2396 arch/x86/kernel/cpu/mce/core.c static int mce_cpu_pre_down(unsigned int cpu)
cpu              2402 arch/x86/kernel/cpu/mce/core.c 	mce_threshold_remove_device(cpu);
cpu              2403 arch/x86/kernel/cpu/mce/core.c 	mce_device_remove(cpu);
cpu               161 arch/x86/kernel/cpu/mce/inject.c 	int cpu = smp_processor_id();
cpu               163 arch/x86/kernel/cpu/mce/inject.c 	if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
cpu               165 arch/x86/kernel/cpu/mce/inject.c 	cpumask_clear_cpu(cpu, mce_inject_cpumask);
cpu               175 arch/x86/kernel/cpu/mce/inject.c 	int cpu = smp_processor_id();
cpu               178 arch/x86/kernel/cpu/mce/inject.c 	if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
cpu               180 arch/x86/kernel/cpu/mce/inject.c 		cpumask_clear_cpu(cpu, mce_inject_cpumask);
cpu               191 arch/x86/kernel/cpu/mce/inject.c 	int cpu = m->extcpu;
cpu               194 arch/x86/kernel/cpu/mce/inject.c 		pr_info("Triggering MCE exception on CPU %d\n", cpu);
cpu               210 arch/x86/kernel/cpu/mce/inject.c 		pr_info("MCE exception done on CPU %d\n", cpu);
cpu               212 arch/x86/kernel/cpu/mce/inject.c 		pr_info("Starting machine check poll CPU %d\n", cpu);
cpu               215 arch/x86/kernel/cpu/mce/inject.c 		pr_info("Machine check poll done on CPU %d\n", cpu);
cpu               233 arch/x86/kernel/cpu/mce/inject.c 		int cpu;
cpu               238 arch/x86/kernel/cpu/mce/inject.c 		for_each_online_cpu(cpu) {
cpu               239 arch/x86/kernel/cpu/mce/inject.c 			struct mce *mcpu = &per_cpu(injectm, cpu);
cpu               242 arch/x86/kernel/cpu/mce/inject.c 				cpumask_clear_cpu(cpu, mce_inject_cpumask);
cpu               300 arch/x86/kernel/cpu/mce/inject.c static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
cpu               305 arch/x86/kernel/cpu/mce/inject.c 	err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
cpu               313 arch/x86/kernel/cpu/mce/inject.c 	err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
cpu               485 arch/x86/kernel/cpu/mce/inject.c 	unsigned int cpu = i_mce.extcpu;
cpu               525 arch/x86/kernel/cpu/mce/inject.c 		toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
cpu               526 arch/x86/kernel/cpu/mce/inject.c 		cpu = get_nbc_for_node(amd_get_nb_id(cpu));
cpu               530 arch/x86/kernel/cpu/mce/inject.c 	if (!cpu_online(cpu))
cpu               533 arch/x86/kernel/cpu/mce/inject.c 	toggle_hw_mce_inject(cpu, true);
cpu               537 arch/x86/kernel/cpu/mce/inject.c 	smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
cpu               539 arch/x86/kernel/cpu/mce/inject.c 	toggle_hw_mce_inject(cpu, false);
cpu               543 arch/x86/kernel/cpu/mce/inject.c 		smp_call_function_single(cpu, trigger_dfr_int, NULL, 0);
cpu               546 arch/x86/kernel/cpu/mce/inject.c 		smp_call_function_single(cpu, trigger_thr_int, NULL, 0);
cpu               549 arch/x86/kernel/cpu/mce/inject.c 		smp_call_function_single(cpu, trigger_mce, NULL, 0);
cpu               144 arch/x86/kernel/cpu/mce/intel.c void mce_intel_hcpu_update(unsigned long cpu)
cpu               146 arch/x86/kernel/cpu/mce/intel.c 	if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
cpu               149 arch/x86/kernel/cpu/mce/intel.c 	per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
cpu                46 arch/x86/kernel/cpu/mce/internal.h void mce_intel_hcpu_update(unsigned long cpu);
cpu                51 arch/x86/kernel/cpu/mce/internal.h static inline void mce_intel_hcpu_update(unsigned long cpu) { }
cpu                98 arch/x86/kernel/cpu/mce/therm_throt.c 	unsigned int cpu = dev->id;					\
cpu               102 arch/x86/kernel/cpu/mce/therm_throt.c 	if (cpu_online(cpu)) {						\
cpu               104 arch/x86/kernel/cpu/mce/therm_throt.c 			      per_cpu(thermal_state, cpu).event.name);	\
cpu               238 arch/x86/kernel/cpu/mce/therm_throt.c static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu)
cpu               241 arch/x86/kernel/cpu/mce/therm_throt.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               270 arch/x86/kernel/cpu/mce/therm_throt.c static int thermal_throttle_online(unsigned int cpu)
cpu               272 arch/x86/kernel/cpu/mce/therm_throt.c 	struct device *dev = get_cpu_device(cpu);
cpu               274 arch/x86/kernel/cpu/mce/therm_throt.c 	return thermal_throttle_add_dev(dev, cpu);
cpu               277 arch/x86/kernel/cpu/mce/therm_throt.c static int thermal_throttle_offline(unsigned int cpu)
cpu               279 arch/x86/kernel/cpu/mce/therm_throt.c 	struct device *dev = get_cpu_device(cpu);
cpu               430 arch/x86/kernel/cpu/mce/therm_throt.c 	unsigned int cpu = smp_processor_id();
cpu               461 arch/x86/kernel/cpu/mce/therm_throt.c 			pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu);
cpu               583 arch/x86/kernel/cpu/microcode/amd.c static u16 __find_equiv_id(unsigned int cpu)
cpu               585 arch/x86/kernel/cpu/microcode/amd.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               636 arch/x86/kernel/cpu/microcode/amd.c static struct ucode_patch *find_patch(unsigned int cpu)
cpu               640 arch/x86/kernel/cpu/microcode/amd.c 	equiv_id = __find_equiv_id(cpu);
cpu               647 arch/x86/kernel/cpu/microcode/amd.c static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
cpu               649 arch/x86/kernel/cpu/microcode/amd.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               650 arch/x86/kernel/cpu/microcode/amd.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               660 arch/x86/kernel/cpu/microcode/amd.c 	p = find_patch(cpu);
cpu               664 arch/x86/kernel/cpu/microcode/amd.c 	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
cpu               669 arch/x86/kernel/cpu/microcode/amd.c static enum ucode_state apply_microcode_amd(int cpu)
cpu               671 arch/x86/kernel/cpu/microcode/amd.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               678 arch/x86/kernel/cpu/microcode/amd.c 	BUG_ON(raw_smp_processor_id() != cpu);
cpu               680 arch/x86/kernel/cpu/microcode/amd.c 	uci = ucode_cpu_info + cpu;
cpu               682 arch/x86/kernel/cpu/microcode/amd.c 	p = find_patch(cpu);
cpu               699 arch/x86/kernel/cpu/microcode/amd.c 			cpu, mc_amd->hdr.patch_id);
cpu               706 arch/x86/kernel/cpu/microcode/amd.c 	pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
cpu               888 arch/x86/kernel/cpu/microcode/amd.c static enum ucode_state request_microcode_amd(int cpu, struct device *device,
cpu               892 arch/x86/kernel/cpu/microcode/amd.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               923 arch/x86/kernel/cpu/microcode/amd.c request_microcode_user(int cpu, const void __user *buf, size_t size)
cpu               928 arch/x86/kernel/cpu/microcode/amd.c static void microcode_fini_cpu_amd(int cpu)
cpu               930 arch/x86/kernel/cpu/microcode/amd.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               354 arch/x86/kernel/cpu/microcode/core.c static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
cpu               359 arch/x86/kernel/cpu/microcode/core.c 	ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
cpu               366 arch/x86/kernel/cpu/microcode/core.c static int collect_cpu_info(int cpu)
cpu               368 arch/x86/kernel/cpu/microcode/core.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               373 arch/x86/kernel/cpu/microcode/core.c 	ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
cpu               387 arch/x86/kernel/cpu/microcode/core.c static int apply_microcode_on_target(int cpu)
cpu               392 arch/x86/kernel/cpu/microcode/core.c 	ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
cpu               404 arch/x86/kernel/cpu/microcode/core.c 	int cpu;
cpu               406 arch/x86/kernel/cpu/microcode/core.c 	for_each_online_cpu(cpu) {
cpu               407 arch/x86/kernel/cpu/microcode/core.c 		struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               413 arch/x86/kernel/cpu/microcode/core.c 		ustate = microcode_ops->request_microcode_user(cpu, buf, size);
cpu               418 arch/x86/kernel/cpu/microcode/core.c 			apply_microcode_on_target(cpu);
cpu               510 arch/x86/kernel/cpu/microcode/core.c 	unsigned int cpu;
cpu               516 arch/x86/kernel/cpu/microcode/core.c 	for_each_present_cpu(cpu) {
cpu               517 arch/x86/kernel/cpu/microcode/core.c 		if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
cpu               558 arch/x86/kernel/cpu/microcode/core.c 	int cpu = smp_processor_id();
cpu               575 arch/x86/kernel/cpu/microcode/core.c 		pr_warn("Error reloading microcode on CPU %d\n", cpu);
cpu               683 arch/x86/kernel/cpu/microcode/core.c static void microcode_fini_cpu(int cpu)
cpu               686 arch/x86/kernel/cpu/microcode/core.c 		microcode_ops->microcode_fini_cpu(cpu);
cpu               689 arch/x86/kernel/cpu/microcode/core.c static enum ucode_state microcode_resume_cpu(int cpu)
cpu               691 arch/x86/kernel/cpu/microcode/core.c 	if (apply_microcode_on_target(cpu))
cpu               694 arch/x86/kernel/cpu/microcode/core.c 	pr_debug("CPU%d updated upon resume\n", cpu);
cpu               699 arch/x86/kernel/cpu/microcode/core.c static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
cpu               702 arch/x86/kernel/cpu/microcode/core.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               707 arch/x86/kernel/cpu/microcode/core.c 	if (collect_cpu_info(cpu))
cpu               714 arch/x86/kernel/cpu/microcode/core.c 	ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
cpu               716 arch/x86/kernel/cpu/microcode/core.c 		pr_debug("CPU%d updated upon init\n", cpu);
cpu               717 arch/x86/kernel/cpu/microcode/core.c 		apply_microcode_on_target(cpu);
cpu               723 arch/x86/kernel/cpu/microcode/core.c static enum ucode_state microcode_update_cpu(int cpu)
cpu               725 arch/x86/kernel/cpu/microcode/core.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               728 arch/x86/kernel/cpu/microcode/core.c 	collect_cpu_info(cpu);
cpu               731 arch/x86/kernel/cpu/microcode/core.c 		return microcode_resume_cpu(cpu);
cpu               733 arch/x86/kernel/cpu/microcode/core.c 	return microcode_init_cpu(cpu, false);
cpu               738 arch/x86/kernel/cpu/microcode/core.c 	int err, cpu = dev->id;
cpu               740 arch/x86/kernel/cpu/microcode/core.c 	if (!cpu_online(cpu))
cpu               743 arch/x86/kernel/cpu/microcode/core.c 	pr_debug("CPU%d added\n", cpu);
cpu               749 arch/x86/kernel/cpu/microcode/core.c 	if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
cpu               757 arch/x86/kernel/cpu/microcode/core.c 	int cpu = dev->id;
cpu               759 arch/x86/kernel/cpu/microcode/core.c 	if (!cpu_online(cpu))
cpu               762 arch/x86/kernel/cpu/microcode/core.c 	pr_debug("CPU%d removed\n", cpu);
cpu               763 arch/x86/kernel/cpu/microcode/core.c 	microcode_fini_cpu(cpu);
cpu               779 arch/x86/kernel/cpu/microcode/core.c 	int cpu = smp_processor_id();
cpu               780 arch/x86/kernel/cpu/microcode/core.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               783 arch/x86/kernel/cpu/microcode/core.c 		microcode_ops->apply_microcode(cpu);
cpu               792 arch/x86/kernel/cpu/microcode/core.c static int mc_cpu_starting(unsigned int cpu)
cpu               794 arch/x86/kernel/cpu/microcode/core.c 	microcode_update_cpu(cpu);
cpu               795 arch/x86/kernel/cpu/microcode/core.c 	pr_debug("CPU%d added\n", cpu);
cpu               799 arch/x86/kernel/cpu/microcode/core.c static int mc_cpu_online(unsigned int cpu)
cpu               801 arch/x86/kernel/cpu/microcode/core.c 	struct device *dev = get_cpu_device(cpu);
cpu               804 arch/x86/kernel/cpu/microcode/core.c 		pr_err("Failed to create group for CPU%d\n", cpu);
cpu               808 arch/x86/kernel/cpu/microcode/core.c static int mc_cpu_down_prep(unsigned int cpu)
cpu               812 arch/x86/kernel/cpu/microcode/core.c 	dev = get_cpu_device(cpu);
cpu               815 arch/x86/kernel/cpu/microcode/core.c 	pr_debug("CPU%d removed\n", cpu);
cpu               790 arch/x86/kernel/cpu/microcode/intel.c static enum ucode_state apply_microcode_intel(int cpu)
cpu               792 arch/x86/kernel/cpu/microcode/intel.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               793 arch/x86/kernel/cpu/microcode/intel.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               800 arch/x86/kernel/cpu/microcode/intel.c 	if (WARN_ON(raw_smp_processor_id() != cpu))
cpu               835 arch/x86/kernel/cpu/microcode/intel.c 		       cpu, mc->hdr.rev);
cpu               861 arch/x86/kernel/cpu/microcode/intel.c static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
cpu               863 arch/x86/kernel/cpu/microcode/intel.c 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpu               940 arch/x86/kernel/cpu/microcode/intel.c 		 cpu, new_rev, uci->cpu_sig.rev);
cpu               945 arch/x86/kernel/cpu/microcode/intel.c static bool is_blacklisted(unsigned int cpu)
cpu               947 arch/x86/kernel/cpu/microcode/intel.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               968 arch/x86/kernel/cpu/microcode/intel.c static enum ucode_state request_microcode_fw(int cpu, struct device *device,
cpu               971 arch/x86/kernel/cpu/microcode/intel.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               978 arch/x86/kernel/cpu/microcode/intel.c 	if (is_blacklisted(cpu))
cpu               992 arch/x86/kernel/cpu/microcode/intel.c 	ret = generic_load_microcode(cpu, &iter);
cpu              1000 arch/x86/kernel/cpu/microcode/intel.c request_microcode_user(int cpu, const void __user *buf, size_t size)
cpu              1005 arch/x86/kernel/cpu/microcode/intel.c 	if (is_blacklisted(cpu))
cpu              1012 arch/x86/kernel/cpu/microcode/intel.c 	return generic_load_microcode(cpu, &iter);
cpu                14 arch/x86/kernel/cpu/proc.c 			      unsigned int cpu)
cpu                19 arch/x86/kernel/cpu/proc.c 		   cpumask_weight(topology_core_cpumask(cpu)));
cpu                60 arch/x86/kernel/cpu/proc.c 	unsigned int cpu;
cpu                63 arch/x86/kernel/cpu/proc.c 	cpu = c->cpu_index;
cpu                69 arch/x86/kernel/cpu/proc.c 		   cpu,
cpu                83 arch/x86/kernel/cpu/proc.c 		unsigned int freq = aperfmperf_get_khz(cpu);
cpu                86 arch/x86/kernel/cpu/proc.c 			freq = cpufreq_quick_get(cpu);
cpu                97 arch/x86/kernel/cpu/proc.c 	show_cpuinfo_core(m, c, cpu);
cpu               349 arch/x86/kernel/cpu/resctrl/core.c static int get_cache_id(int cpu, int level)
cpu               351 arch/x86/kernel/cpu/resctrl/core.c 	struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
cpu               405 arch/x86/kernel/cpu/resctrl/core.c struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
cpu               411 arch/x86/kernel/cpu/resctrl/core.c 		if (cpumask_test_cpu(cpu, &d->cpu_mask))
cpu               422 arch/x86/kernel/cpu/resctrl/core.c 	int cpu = smp_processor_id();
cpu               425 arch/x86/kernel/cpu/resctrl/core.c 	d = get_domain_from_cpu(cpu, r);
cpu               431 arch/x86/kernel/cpu/resctrl/core.c 		     cpu, r->name);
cpu               557 arch/x86/kernel/cpu/resctrl/core.c static void domain_add_cpu(int cpu, struct rdt_resource *r)
cpu               559 arch/x86/kernel/cpu/resctrl/core.c 	int id = get_cache_id(cpu, r->cache_level);
cpu               565 arch/x86/kernel/cpu/resctrl/core.c 		pr_warn("Could't find cache id for cpu %d\n", cpu);
cpu               570 arch/x86/kernel/cpu/resctrl/core.c 		cpumask_set_cpu(cpu, &d->cpu_mask);
cpu               574 arch/x86/kernel/cpu/resctrl/core.c 	d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
cpu               579 arch/x86/kernel/cpu/resctrl/core.c 	cpumask_set_cpu(cpu, &d->cpu_mask);
cpu               603 arch/x86/kernel/cpu/resctrl/core.c static void domain_remove_cpu(int cpu, struct rdt_resource *r)
cpu               605 arch/x86/kernel/cpu/resctrl/core.c 	int id = get_cache_id(cpu, r->cache_level);
cpu               610 arch/x86/kernel/cpu/resctrl/core.c 		pr_warn("Could't find cache id for cpu %d\n", cpu);
cpu               614 arch/x86/kernel/cpu/resctrl/core.c 	cpumask_clear_cpu(cpu, &d->cpu_mask);
cpu               655 arch/x86/kernel/cpu/resctrl/core.c 		if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
cpu               659 arch/x86/kernel/cpu/resctrl/core.c 		if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
cpu               667 arch/x86/kernel/cpu/resctrl/core.c static void clear_closid_rmid(int cpu)
cpu               678 arch/x86/kernel/cpu/resctrl/core.c static int resctrl_online_cpu(unsigned int cpu)
cpu               684 arch/x86/kernel/cpu/resctrl/core.c 		domain_add_cpu(cpu, r);
cpu               686 arch/x86/kernel/cpu/resctrl/core.c 	cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
cpu               687 arch/x86/kernel/cpu/resctrl/core.c 	clear_closid_rmid(cpu);
cpu               693 arch/x86/kernel/cpu/resctrl/core.c static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
cpu               698 arch/x86/kernel/cpu/resctrl/core.c 		if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
cpu               704 arch/x86/kernel/cpu/resctrl/core.c static int resctrl_offline_cpu(unsigned int cpu)
cpu               711 arch/x86/kernel/cpu/resctrl/core.c 		domain_remove_cpu(cpu, r);
cpu               713 arch/x86/kernel/cpu/resctrl/core.c 		if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
cpu               714 arch/x86/kernel/cpu/resctrl/core.c 			clear_childcpus(rdtgrp, cpu);
cpu               718 arch/x86/kernel/cpu/resctrl/core.c 	clear_closid_rmid(cpu);
cpu               312 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	int cpu;
cpu               336 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	cpu = get_cpu();
cpu               338 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	if (cpumask_test_cpu(cpu, cpu_mask))
cpu               177 arch/x86/kernel/cpu/resctrl/internal.h 	int			cpu;
cpu               577 arch/x86/kernel/cpu/resctrl/internal.h struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
cpu               169 arch/x86/kernel/cpu/resctrl/monitor.c 	int cpu;
cpu               175 arch/x86/kernel/cpu/resctrl/monitor.c 	cpu = get_cpu();
cpu               177 arch/x86/kernel/cpu/resctrl/monitor.c 		if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
cpu               473 arch/x86/kernel/cpu/resctrl/monitor.c 	int cpu = smp_processor_id();
cpu               480 arch/x86/kernel/cpu/resctrl/monitor.c 	d = get_domain_from_cpu(cpu, r);
cpu               490 arch/x86/kernel/cpu/resctrl/monitor.c 		schedule_delayed_work_on(cpu, &d->cqm_limbo, delay);
cpu               499 arch/x86/kernel/cpu/resctrl/monitor.c 	int cpu;
cpu               501 arch/x86/kernel/cpu/resctrl/monitor.c 	cpu = cpumask_any(&dom->cpu_mask);
cpu               502 arch/x86/kernel/cpu/resctrl/monitor.c 	dom->cqm_work_cpu = cpu;
cpu               504 arch/x86/kernel/cpu/resctrl/monitor.c 	schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
cpu               511 arch/x86/kernel/cpu/resctrl/monitor.c 	int cpu = smp_processor_id();
cpu               520 arch/x86/kernel/cpu/resctrl/monitor.c 	d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]);
cpu               535 arch/x86/kernel/cpu/resctrl/monitor.c 	schedule_delayed_work_on(cpu, &d->mbm_over, delay);
cpu               544 arch/x86/kernel/cpu/resctrl/monitor.c 	int cpu;
cpu               548 arch/x86/kernel/cpu/resctrl/monitor.c 	cpu = cpumask_any(&dom->cpu_mask);
cpu               549 arch/x86/kernel/cpu/resctrl/monitor.c 	dom->mbm_work_cpu = cpu;
cpu               550 arch/x86/kernel/cpu/resctrl/monitor.c 	schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
cpu               203 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	int cpu;
cpu               206 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	for_each_cpu(cpu, &plr->d->cpu_mask) {
cpu               213 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		ret = dev_pm_qos_add_request(get_cpu_device(cpu),
cpu               219 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 					    cpu);
cpu               282 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->cpu = cpumask_first(&plr->d->cpu_mask);
cpu               284 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (!cpu_online(plr->cpu)) {
cpu               286 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 				    plr->cpu);
cpu               291 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	ci = get_cpu_cacheinfo(plr->cpu);
cpu               945 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu,
cpu               950 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu,
cpu              1155 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	unsigned int cpu;
cpu              1172 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	cpu = cpumask_first(&plr->d->cpu_mask);
cpu              1173 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (!cpu_online(cpu)) {
cpu              1178 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->cpu = cpu;
cpu              1182 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 						cpu_to_node(cpu),
cpu              1184 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 						cpu);
cpu              1187 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 						cpu_to_node(cpu),
cpu              1189 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 						cpu);
cpu              1192 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 						cpu_to_node(cpu),
cpu              1194 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 						cpu);
cpu              1202 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	kthread_bind(thread, cpu);
cpu              1292 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 					cpu_to_node(plr->cpu),
cpu              1293 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 					"pseudo_lock/%u", plr->cpu);
cpu              1300 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	kthread_bind(thread, plr->cpu);
cpu               325 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	int cpu = get_cpu();
cpu               327 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	if (cpumask_test_cpu(cpu, cpu_mask))
cpu              1742 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	int cpu;
cpu              1759 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	cpu = get_cpu();
cpu              1761 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	if (cpumask_test_cpu(cpu, cpu_mask))
cpu              2129 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	int i, cpu;
cpu              2149 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	cpu = get_cpu();
cpu              2151 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	if (cpumask_test_cpu(cpu, cpu_mask))
cpu              2899 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	int cpu;
cpu              2905 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	for_each_cpu(cpu, &rdtgrp->cpu_mask)
cpu              2906 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
cpu              2951 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	int cpu;
cpu              2961 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	for_each_cpu(cpu, &rdtgrp->cpu_mask) {
cpu              2962 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
cpu              2963 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
cpu                59 arch/x86/kernel/cpu/umwait.c static int umwait_cpu_online(unsigned int cpu)
cpu                71 arch/x86/kernel/cpu/umwait.c static int umwait_cpu_offline(unsigned int cpu)
cpu               142 arch/x86/kernel/cpu/vmware.c 	pv_ops.cpu.io_delay = paravirt_nop;
cpu                67 arch/x86/kernel/cpuid.c 	int cpu = iminor(file_inode(file));
cpu                85 arch/x86/kernel/cpuid.c 		err = smp_call_function_single_async(cpu, &csd);
cpu               104 arch/x86/kernel/cpuid.c 	unsigned int cpu;
cpu               107 arch/x86/kernel/cpuid.c 	cpu = iminor(file_inode(file));
cpu               108 arch/x86/kernel/cpuid.c 	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
cpu               111 arch/x86/kernel/cpuid.c 	c = &cpu_data(cpu);
cpu               128 arch/x86/kernel/cpuid.c static int cpuid_device_create(unsigned int cpu)
cpu               132 arch/x86/kernel/cpuid.c 	dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL,
cpu               133 arch/x86/kernel/cpuid.c 			    "cpu%d", cpu);
cpu               137 arch/x86/kernel/cpuid.c static int cpuid_device_destroy(unsigned int cpu)
cpu               139 arch/x86/kernel/cpuid.c 	device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
cpu                73 arch/x86/kernel/crash.c static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
cpu                75 arch/x86/kernel/crash.c 	crash_save_cpu(regs, cpu);
cpu               307 arch/x86/kernel/dumpstack.c 	int cpu;
cpu               314 arch/x86/kernel/dumpstack.c 	cpu = smp_processor_id();
cpu               316 arch/x86/kernel/dumpstack.c 		if (cpu == die_owner)
cpu               322 arch/x86/kernel/dumpstack.c 	die_owner = cpu;
cpu                75 arch/x86/kernel/espfix_64.c static inline unsigned long espfix_base_addr(unsigned int cpu)
cpu                80 arch/x86/kernel/espfix_64.c 	page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
cpu                81 arch/x86/kernel/espfix_64.c 	slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
cpu               131 arch/x86/kernel/espfix_64.c void init_espfix_ap(int cpu)
cpu               143 arch/x86/kernel/espfix_64.c 	if (likely(per_cpu(espfix_stack, cpu)))
cpu               146 arch/x86/kernel/espfix_64.c 	addr = espfix_base_addr(cpu);
cpu               147 arch/x86/kernel/espfix_64.c 	page = cpu/ESPFIX_STACKS_PER_PAGE;
cpu               161 arch/x86/kernel/espfix_64.c 	node = cpu_to_node(cpu);
cpu               204 arch/x86/kernel/espfix_64.c 	per_cpu(espfix_stack, cpu) = addr;
cpu               205 arch/x86/kernel/espfix_64.c 	per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
cpu                26 arch/x86/kernel/hpet.c 	unsigned int			cpu;
cpu               403 arch/x86/kernel/hpet.c 	evt->cpumask		= cpumask_of(hc->cpu);
cpu               421 arch/x86/kernel/hpet.c 	hc->cpu = boot_cpu_data.cpu_index;
cpu               533 arch/x86/kernel/hpet.c 	irq_set_affinity(hc->irq, cpumask_of(hc->cpu));
cpu               542 arch/x86/kernel/hpet.c static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu)
cpu               546 arch/x86/kernel/hpet.c 	hc->cpu = cpu;
cpu               547 arch/x86/kernel/hpet.c 	per_cpu(cpu_hpet_channel, cpu) = hc;
cpu               572 arch/x86/kernel/hpet.c static int hpet_cpuhp_online(unsigned int cpu)
cpu               577 arch/x86/kernel/hpet.c 		init_one_hpet_msi_clockevent(hc, cpu);
cpu               581 arch/x86/kernel/hpet.c static int hpet_cpuhp_dead(unsigned int cpu)
cpu               583 arch/x86/kernel/hpet.c 	struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu);
cpu               589 arch/x86/kernel/hpet.c 	per_cpu(cpu_hpet_channel, cpu) = NULL;
cpu               440 arch/x86/kernel/hw_breakpoint.c 	int i, cpu, rc = NOTIFY_STOP;
cpu               466 arch/x86/kernel/hw_breakpoint.c 	cpu = get_cpu();
cpu               481 arch/x86/kernel/hw_breakpoint.c 		bp = per_cpu(bp_per_reg[i], cpu);
cpu               191 arch/x86/kernel/irq.c u64 arch_irq_stat_cpu(unsigned int cpu)
cpu               193 arch/x86/kernel/irq.c 	u64 sum = irq_stats(cpu)->__nmi_count;
cpu               196 arch/x86/kernel/irq.c 	sum += irq_stats(cpu)->apic_timer_irqs;
cpu               197 arch/x86/kernel/irq.c 	sum += irq_stats(cpu)->irq_spurious_count;
cpu               198 arch/x86/kernel/irq.c 	sum += irq_stats(cpu)->apic_perf_irqs;
cpu               199 arch/x86/kernel/irq.c 	sum += irq_stats(cpu)->apic_irq_work_irqs;
cpu               200 arch/x86/kernel/irq.c 	sum += irq_stats(cpu)->icr_read_retry_count;
cpu               202 arch/x86/kernel/irq.c 		sum += irq_stats(cpu)->x86_platform_ipis;
cpu               205 arch/x86/kernel/irq.c 	sum += irq_stats(cpu)->irq_resched_count;
cpu               206 arch/x86/kernel/irq.c 	sum += irq_stats(cpu)->irq_call_count;
cpu               209 arch/x86/kernel/irq.c 	sum += irq_stats(cpu)->irq_thermal_count;
cpu               212 arch/x86/kernel/irq.c 	sum += irq_stats(cpu)->irq_threshold_count;
cpu               215 arch/x86/kernel/irq.c 	sum += per_cpu(mce_exception_count, cpu);
cpu               216 arch/x86/kernel/irq.c 	sum += per_cpu(mce_poll_count, cpu);
cpu               112 arch/x86/kernel/irq_32.c int irq_init_percpu_irqstack(unsigned int cpu)
cpu               114 arch/x86/kernel/irq_32.c 	int node = cpu_to_node(cpu);
cpu               117 arch/x86/kernel/irq_32.c 	if (per_cpu(hardirq_stack_ptr, cpu))
cpu               129 arch/x86/kernel/irq_32.c 	per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
cpu               130 arch/x86/kernel/irq_32.c 	per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
cpu                33 arch/x86/kernel/irq_64.c static int map_irq_stack(unsigned int cpu)
cpu                35 arch/x86/kernel/irq_64.c 	char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu);
cpu                50 arch/x86/kernel/irq_64.c 	per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
cpu                58 arch/x86/kernel/irq_64.c static int map_irq_stack(unsigned int cpu)
cpu                60 arch/x86/kernel/irq_64.c 	void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
cpu                62 arch/x86/kernel/irq_64.c 	per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
cpu                67 arch/x86/kernel/irq_64.c int irq_init_percpu_irqstack(unsigned int cpu)
cpu                69 arch/x86/kernel/irq_64.c 	if (per_cpu(hardirq_stack_ptr, cpu))
cpu                71 arch/x86/kernel/irq_64.c 	return map_irq_stack(cpu);
cpu               171 arch/x86/kernel/itmt.c int arch_asym_cpu_priority(int cpu)
cpu               173 arch/x86/kernel/itmt.c 	return per_cpu(sched_core_priority, cpu);
cpu               192 arch/x86/kernel/itmt.c 	int cpu, i = 1;
cpu               194 arch/x86/kernel/itmt.c 	for_each_cpu(cpu, topology_sibling_cpumask(core_cpu)) {
cpu               203 arch/x86/kernel/itmt.c 		per_cpu(sched_core_priority, cpu) = smt_prio;
cpu                85 arch/x86/kernel/jailhouse.c 	unsigned int cpu;
cpu                91 arch/x86/kernel/jailhouse.c 	for (cpu = 0; cpu < setup_data.num_cpus; cpu++) {
cpu                92 arch/x86/kernel/jailhouse.c 		generic_processor_info(setup_data.cpu_ids[cpu],
cpu               197 arch/x86/kernel/kgdb.c 		int cpu = raw_smp_processor_id();
cpu               208 arch/x86/kernel/kgdb.c 		bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
cpu               228 arch/x86/kernel/kgdb.c 	int cpu;
cpu               235 arch/x86/kernel/kgdb.c 	for_each_online_cpu(cpu) {
cpu               237 arch/x86/kernel/kgdb.c 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
cpu               245 arch/x86/kernel/kgdb.c 	for_each_online_cpu(cpu) {
cpu               249 arch/x86/kernel/kgdb.c 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
cpu               258 arch/x86/kernel/kgdb.c 	int cpu;
cpu               263 arch/x86/kernel/kgdb.c 	for_each_online_cpu(cpu) {
cpu               264 arch/x86/kernel/kgdb.c 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
cpu               298 arch/x86/kernel/kgdb.c 	int cpu = raw_smp_processor_id();
cpu               304 arch/x86/kernel/kgdb.c 		bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
cpu               384 arch/x86/kernel/kgdb.c 	int cpu = raw_smp_processor_id();
cpu               397 arch/x86/kernel/kgdb.c 		bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
cpu               498 arch/x86/kernel/kgdb.c 	int cpu;
cpu               504 arch/x86/kernel/kgdb.c 			cpu = raw_smp_processor_id();
cpu               505 arch/x86/kernel/kgdb.c 			kgdb_nmicallback(cpu, regs);
cpu               506 arch/x86/kernel/kgdb.c 			set_bit(cpu, was_in_debug_nmi);
cpu               514 arch/x86/kernel/kgdb.c 		cpu = raw_smp_processor_id();
cpu               516 arch/x86/kernel/kgdb.c 		if (__test_and_clear_bit(cpu, was_in_debug_nmi))
cpu               639 arch/x86/kernel/kgdb.c 	int i, cpu;
cpu               664 arch/x86/kernel/kgdb.c 		for_each_online_cpu(cpu) {
cpu               665 arch/x86/kernel/kgdb.c 			pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
cpu                74 arch/x86/kernel/kvm.c 	int cpu;
cpu               124 arch/x86/kernel/kvm.c 	n.cpu = smp_processor_id();
cpu               167 arch/x86/kernel/kvm.c 		smp_send_reschedule(n->cpu);
cpu               183 arch/x86/kernel/kvm.c 			if (n->cpu == smp_processor_id())
cpu               220 arch/x86/kernel/kvm.c 		n->cpu = smp_processor_id();
cpu               273 arch/x86/kernel/kvm.c 		pv_ops.cpu.io_delay = kvm_io_delay;
cpu               282 arch/x86/kernel/kvm.c 	int cpu = smp_processor_id();
cpu               283 arch/x86/kernel/kvm.c 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
cpu               290 arch/x86/kernel/kvm.c 		cpu, (unsigned long long) slow_virt_to_phys(st));
cpu               379 arch/x86/kernel/kvm.c static u64 kvm_steal_clock(int cpu)
cpu               385 arch/x86/kernel/kvm.c 	src = &per_cpu(steal_time, cpu);
cpu               419 arch/x86/kernel/kvm.c 	int cpu;
cpu               424 arch/x86/kernel/kvm.c 	for_each_possible_cpu(cpu) {
cpu               425 arch/x86/kernel/kvm.c 		__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
cpu               426 arch/x86/kernel/kvm.c 		__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
cpu               427 arch/x86/kernel/kvm.c 		__set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
cpu               437 arch/x86/kernel/kvm.c 	int cpu, apic_id, icr;
cpu               460 arch/x86/kernel/kvm.c 	for_each_cpu(cpu, mask) {
cpu               461 arch/x86/kernel/kvm.c 		apic_id = per_cpu(x86_cpu_to_apicid, cpu);
cpu               517 arch/x86/kernel/kvm.c 	int cpu;
cpu               522 arch/x86/kernel/kvm.c 	for_each_cpu(cpu, mask) {
cpu               523 arch/x86/kernel/kvm.c 		if (vcpu_is_preempted(cpu)) {
cpu               524 arch/x86/kernel/kvm.c 			kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
cpu               559 arch/x86/kernel/kvm.c static int kvm_cpu_online(unsigned int cpu)
cpu               567 arch/x86/kernel/kvm.c static int kvm_cpu_down_prepare(unsigned int cpu)
cpu               587 arch/x86/kernel/kvm.c 	int cpu;
cpu               596 arch/x86/kernel/kvm.c 	for_each_cpu(cpu, flushmask) {
cpu               597 arch/x86/kernel/kvm.c 		src = &per_cpu(steal_time, cpu);
cpu               602 arch/x86/kernel/kvm.c 				__cpumask_clear_cpu(cpu, flushmask);
cpu               740 arch/x86/kernel/kvm.c 	int cpu;
cpu               745 arch/x86/kernel/kvm.c 		for_each_possible_cpu(cpu) {
cpu               746 arch/x86/kernel/kvm.c 			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
cpu               747 arch/x86/kernel/kvm.c 				GFP_KERNEL, cpu_to_node(cpu));
cpu               759 arch/x86/kernel/kvm.c static void kvm_kick_cpu(int cpu)
cpu               764 arch/x86/kernel/kvm.c 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
cpu               797 arch/x86/kernel/kvm.c __visible bool __kvm_vcpu_is_preempted(long cpu)
cpu               799 arch/x86/kernel/kvm.c 	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
cpu               872 arch/x86/kernel/kvm.c void arch_haltpoll_enable(unsigned int cpu)
cpu               881 arch/x86/kernel/kvm.c 	smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
cpu               885 arch/x86/kernel/kvm.c void arch_haltpoll_disable(unsigned int cpu)
cpu               891 arch/x86/kernel/kvm.c 	smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
cpu               284 arch/x86/kernel/kvmclock.c static int kvmclock_setup_percpu(unsigned int cpu)
cpu               286 arch/x86/kernel/kvmclock.c 	struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
cpu               293 arch/x86/kernel/kvmclock.c 	if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
cpu               297 arch/x86/kernel/kvmclock.c 	if (cpu < HVC_BOOT_ARRAY_SIZE)
cpu               298 arch/x86/kernel/kvmclock.c 		p = &hv_clock_boot[cpu];
cpu               300 arch/x86/kernel/kvmclock.c 		p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
cpu               304 arch/x86/kernel/kvmclock.c 	per_cpu(hv_clock_per_cpu, cpu) = p;
cpu                51 arch/x86/kernel/msr.c 	int cpu = iminor(file_inode(file));
cpu                59 arch/x86/kernel/msr.c 		err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]);
cpu                79 arch/x86/kernel/msr.c 	int cpu = iminor(file_inode(file));
cpu                95 arch/x86/kernel/msr.c 		err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
cpu               109 arch/x86/kernel/msr.c 	int cpu = iminor(file_inode(file));
cpu               122 arch/x86/kernel/msr.c 		err = rdmsr_safe_regs_on_cpu(cpu, regs);
cpu               141 arch/x86/kernel/msr.c 		err = wrmsr_safe_regs_on_cpu(cpu, regs);
cpu               158 arch/x86/kernel/msr.c 	unsigned int cpu = iminor(file_inode(file));
cpu               164 arch/x86/kernel/msr.c 	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
cpu               167 arch/x86/kernel/msr.c 	c = &cpu_data(cpu);
cpu               187 arch/x86/kernel/msr.c static int msr_device_create(unsigned int cpu)
cpu               191 arch/x86/kernel/msr.c 	dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL,
cpu               192 arch/x86/kernel/msr.c 			    "msr%d", cpu);
cpu               196 arch/x86/kernel/msr.c static int msr_device_destroy(unsigned int cpu)
cpu               198 arch/x86/kernel/msr.c 	device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
cpu                57 arch/x86/kernel/nmi_selftest.c         int cpu = raw_smp_processor_id();
cpu                59 arch/x86/kernel/nmi_selftest.c         if (cpumask_test_and_clear_cpu(cpu, to_cpumask(nmi_ipi_mask)))
cpu                24 arch/x86/kernel/paravirt-spinlocks.c __visible bool __native_vcpu_is_preempted(long cpu)
cpu               137 arch/x86/kernel/paravirt.c 	else if (type == PARAVIRT_PATCH(cpu.iret) ||
cpu               138 arch/x86/kernel/paravirt.c 		 type == PARAVIRT_PATCH(cpu.usergs_sysret64))
cpu               184 arch/x86/kernel/paravirt.c static u64 native_steal_clock(int cpu)
cpu               305 arch/x86/kernel/paravirt.c 	.cpu.io_delay		= native_io_delay,
cpu               308 arch/x86/kernel/paravirt.c 	.cpu.cpuid		= native_cpuid,
cpu               309 arch/x86/kernel/paravirt.c 	.cpu.get_debugreg	= native_get_debugreg,
cpu               310 arch/x86/kernel/paravirt.c 	.cpu.set_debugreg	= native_set_debugreg,
cpu               311 arch/x86/kernel/paravirt.c 	.cpu.read_cr0		= native_read_cr0,
cpu               312 arch/x86/kernel/paravirt.c 	.cpu.write_cr0		= native_write_cr0,
cpu               313 arch/x86/kernel/paravirt.c 	.cpu.write_cr4		= native_write_cr4,
cpu               314 arch/x86/kernel/paravirt.c 	.cpu.wbinvd		= native_wbinvd,
cpu               315 arch/x86/kernel/paravirt.c 	.cpu.read_msr		= native_read_msr,
cpu               316 arch/x86/kernel/paravirt.c 	.cpu.write_msr		= native_write_msr,
cpu               317 arch/x86/kernel/paravirt.c 	.cpu.read_msr_safe	= native_read_msr_safe,
cpu               318 arch/x86/kernel/paravirt.c 	.cpu.write_msr_safe	= native_write_msr_safe,
cpu               319 arch/x86/kernel/paravirt.c 	.cpu.read_pmc		= native_read_pmc,
cpu               320 arch/x86/kernel/paravirt.c 	.cpu.load_tr_desc	= native_load_tr_desc,
cpu               321 arch/x86/kernel/paravirt.c 	.cpu.set_ldt		= native_set_ldt,
cpu               322 arch/x86/kernel/paravirt.c 	.cpu.load_gdt		= native_load_gdt,
cpu               323 arch/x86/kernel/paravirt.c 	.cpu.load_idt		= native_load_idt,
cpu               324 arch/x86/kernel/paravirt.c 	.cpu.store_tr		= native_store_tr,
cpu               325 arch/x86/kernel/paravirt.c 	.cpu.load_tls		= native_load_tls,
cpu               327 arch/x86/kernel/paravirt.c 	.cpu.load_gs_index	= native_load_gs_index,
cpu               329 arch/x86/kernel/paravirt.c 	.cpu.write_ldt_entry	= native_write_ldt_entry,
cpu               330 arch/x86/kernel/paravirt.c 	.cpu.write_gdt_entry	= native_write_gdt_entry,
cpu               331 arch/x86/kernel/paravirt.c 	.cpu.write_idt_entry	= native_write_idt_entry,
cpu               333 arch/x86/kernel/paravirt.c 	.cpu.alloc_ldt		= paravirt_nop,
cpu               334 arch/x86/kernel/paravirt.c 	.cpu.free_ldt		= paravirt_nop,
cpu               336 arch/x86/kernel/paravirt.c 	.cpu.load_sp0		= native_load_sp0,
cpu               339 arch/x86/kernel/paravirt.c 	.cpu.usergs_sysret64	= native_usergs_sysret64,
cpu               341 arch/x86/kernel/paravirt.c 	.cpu.iret		= native_iret,
cpu               342 arch/x86/kernel/paravirt.c 	.cpu.swapgs		= native_swapgs,
cpu               344 arch/x86/kernel/paravirt.c 	.cpu.set_iopl_mask	= native_set_iopl_mask,
cpu               346 arch/x86/kernel/paravirt.c 	.cpu.start_context_switch	= paravirt_nop,
cpu               347 arch/x86/kernel/paravirt.c 	.cpu.end_context_switch		= paravirt_nop,
cpu               102 arch/x86/kernel/paravirt_patch.c 	PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
cpu               103 arch/x86/kernel/paravirt_patch.c 	PATCH_CASE(cpu, swapgs, xxl, insn_buff, len);
cpu               104 arch/x86/kernel/paravirt_patch.c 	PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
cpu               106 arch/x86/kernel/paravirt_patch.c 	PATCH_CASE(cpu, iret, xxl, insn_buff, len);
cpu               315 arch/x86/kernel/process.c 	unsigned int cpu;
cpu               332 arch/x86/kernel/process.c 	for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
cpu               333 arch/x86/kernel/process.c 		if (cpu == this_cpu)
cpu               336 arch/x86/kernel/process.c 		if (!per_cpu(ssb_state, cpu).shared_state)
cpu               340 arch/x86/kernel/process.c 		st->shared_state = per_cpu(ssb_state, cpu).shared_state;
cpu               234 arch/x86/kernel/process_32.c 	int cpu = smp_processor_id();
cpu               239 arch/x86/kernel/process_32.c 		switch_fpu_prepare(prev_fpu, cpu);
cpu               256 arch/x86/kernel/process_32.c 	load_TLS(next, cpu);
cpu               510 arch/x86/kernel/process_64.c 	int cpu = smp_processor_id();
cpu               516 arch/x86/kernel/process_64.c 		switch_fpu_prepare(prev_fpu, cpu);
cpu               529 arch/x86/kernel/process_64.c 	load_TLS(next, cpu);
cpu               529 arch/x86/kernel/reboot.c static void vmxoff_nmi(int cpu, struct pt_regs *regs)
cpu               815 arch/x86/kernel/reboot.c 	int cpu;
cpu               817 arch/x86/kernel/reboot.c 	cpu = raw_smp_processor_id();
cpu               824 arch/x86/kernel/reboot.c 	if (cpu == crashing_cpu)
cpu               828 arch/x86/kernel/reboot.c 	shootdown_callback(cpu, regs);
cpu                71 arch/x86/kernel/setup_percpu.c 	unsigned int cpu;
cpu                73 arch/x86/kernel/setup_percpu.c 	for_each_possible_cpu(cpu) {
cpu                74 arch/x86/kernel/setup_percpu.c 		int node = early_cpu_to_node(cpu);
cpu               100 arch/x86/kernel/setup_percpu.c static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
cpu               105 arch/x86/kernel/setup_percpu.c 	int node = early_cpu_to_node(cpu);
cpu               111 arch/x86/kernel/setup_percpu.c 			cpu, node);
cpu               113 arch/x86/kernel/setup_percpu.c 			 cpu, size, __pa(ptr));
cpu               120 arch/x86/kernel/setup_percpu.c 			 cpu, size, node, __pa(ptr));
cpu               131 arch/x86/kernel/setup_percpu.c static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
cpu               133 arch/x86/kernel/setup_percpu.c 	return pcpu_alloc_bootmem(cpu, size, align);
cpu               158 arch/x86/kernel/setup_percpu.c static inline void setup_percpu_segment(int cpu)
cpu               161 arch/x86/kernel/setup_percpu.c 	struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
cpu               164 arch/x86/kernel/setup_percpu.c 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
cpu               170 arch/x86/kernel/setup_percpu.c 	unsigned int cpu;
cpu               222 arch/x86/kernel/setup_percpu.c 	for_each_possible_cpu(cpu) {
cpu               223 arch/x86/kernel/setup_percpu.c 		per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
cpu               224 arch/x86/kernel/setup_percpu.c 		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
cpu               225 arch/x86/kernel/setup_percpu.c 		per_cpu(cpu_number, cpu) = cpu;
cpu               226 arch/x86/kernel/setup_percpu.c 		setup_percpu_segment(cpu);
cpu               227 arch/x86/kernel/setup_percpu.c 		setup_stack_canary_segment(cpu);
cpu               236 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_apicid, cpu) =
cpu               237 arch/x86/kernel/setup_percpu.c 			early_per_cpu_map(x86_cpu_to_apicid, cpu);
cpu               238 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_bios_cpu_apicid, cpu) =
cpu               239 arch/x86/kernel/setup_percpu.c 			early_per_cpu_map(x86_bios_cpu_apicid, cpu);
cpu               240 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_acpiid, cpu) =
cpu               241 arch/x86/kernel/setup_percpu.c 			early_per_cpu_map(x86_cpu_to_acpiid, cpu);
cpu               244 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_logical_apicid, cpu) =
cpu               245 arch/x86/kernel/setup_percpu.c 			early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
cpu               248 arch/x86/kernel/setup_percpu.c 		per_cpu(x86_cpu_to_node_map, cpu) =
cpu               249 arch/x86/kernel/setup_percpu.c 			early_per_cpu_map(x86_cpu_to_node_map, cpu);
cpu               258 arch/x86/kernel/setup_percpu.c 		set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
cpu               264 arch/x86/kernel/setup_percpu.c 		if (!cpu)
cpu               265 arch/x86/kernel/setup_percpu.c 			switch_to_new_gdt(cpu);
cpu               279 arch/x86/kernel/smpboot.c bool topology_is_primary_thread(unsigned int cpu)
cpu               281 arch/x86/kernel/smpboot.c 	return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
cpu               299 arch/x86/kernel/smpboot.c 	int cpu;
cpu               301 arch/x86/kernel/smpboot.c 	for_each_possible_cpu(cpu) {
cpu               302 arch/x86/kernel/smpboot.c 		struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               317 arch/x86/kernel/smpboot.c 	int cpu;
cpu               320 arch/x86/kernel/smpboot.c 	for_each_possible_cpu(cpu) {
cpu               321 arch/x86/kernel/smpboot.c 		struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               336 arch/x86/kernel/smpboot.c int topology_update_package_map(unsigned int pkg, unsigned int cpu)
cpu               348 arch/x86/kernel/smpboot.c 			cpu, pkg, new);
cpu               351 arch/x86/kernel/smpboot.c 	cpu_data(cpu).logical_proc_id = new;
cpu               359 arch/x86/kernel/smpboot.c int topology_update_die_map(unsigned int die, unsigned int cpu)
cpu               364 arch/x86/kernel/smpboot.c 	new = topology_phys_to_logical_die(die, cpu);
cpu               371 arch/x86/kernel/smpboot.c 			cpu, die, new);
cpu               374 arch/x86/kernel/smpboot.c 	cpu_data(cpu).logical_die_id = new;
cpu               573 arch/x86/kernel/smpboot.c void set_cpu_sibling_map(int cpu)
cpu               577 arch/x86/kernel/smpboot.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               581 arch/x86/kernel/smpboot.c 	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
cpu               584 arch/x86/kernel/smpboot.c 		cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
cpu               585 arch/x86/kernel/smpboot.c 		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
cpu               586 arch/x86/kernel/smpboot.c 		cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
cpu               587 arch/x86/kernel/smpboot.c 		cpumask_set_cpu(cpu, topology_die_cpumask(cpu));
cpu               595 arch/x86/kernel/smpboot.c 		if ((i == cpu) || (has_smt && match_smt(c, o)))
cpu               596 arch/x86/kernel/smpboot.c 			link_mask(topology_sibling_cpumask, cpu, i);
cpu               598 arch/x86/kernel/smpboot.c 		if ((i == cpu) || (has_mp && match_llc(c, o)))
cpu               599 arch/x86/kernel/smpboot.c 			link_mask(cpu_llc_shared_mask, cpu, i);
cpu               610 arch/x86/kernel/smpboot.c 		if ((i == cpu) || (has_mp && match_pkg(c, o))) {
cpu               611 arch/x86/kernel/smpboot.c 			link_mask(topology_core_cpumask, cpu, i);
cpu               617 arch/x86/kernel/smpboot.c 			    topology_sibling_cpumask(cpu)) == 1) {
cpu               629 arch/x86/kernel/smpboot.c 				if (i != cpu)
cpu               631 arch/x86/kernel/smpboot.c 			} else if (i != cpu && !c->booted_cores)
cpu               637 arch/x86/kernel/smpboot.c 		if ((i == cpu) || (has_mp && match_die(c, o)))
cpu               638 arch/x86/kernel/smpboot.c 			link_mask(topology_die_cpumask, cpu, i);
cpu               641 arch/x86/kernel/smpboot.c 	threads = cpumask_weight(topology_sibling_cpumask(cpu));
cpu               647 arch/x86/kernel/smpboot.c const struct cpumask *cpu_coregroup_mask(int cpu)
cpu               649 arch/x86/kernel/smpboot.c 	return cpu_llc_shared_mask(cpu);
cpu               654 arch/x86/kernel/smpboot.c 	int cpu;
cpu               660 arch/x86/kernel/smpboot.c 	for_each_possible_cpu(cpu)
cpu               661 arch/x86/kernel/smpboot.c 		if (cpumask_test_cpu(cpu, cpu_callout_mask))
cpu               662 arch/x86/kernel/smpboot.c 			bogosum += cpu_data(cpu).loops_per_jiffy;
cpu               902 arch/x86/kernel/smpboot.c static void announce_cpu(int cpu, int apicid)
cpu               905 arch/x86/kernel/smpboot.c 	int node = early_cpu_to_node(cpu);
cpu               914 arch/x86/kernel/smpboot.c 	if (cpu == 1)
cpu               928 arch/x86/kernel/smpboot.c 		if (cpu == 1)
cpu               931 arch/x86/kernel/smpboot.c 		pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
cpu               935 arch/x86/kernel/smpboot.c 			node, cpu, apicid);
cpu               940 arch/x86/kernel/smpboot.c 	int cpu;
cpu               942 arch/x86/kernel/smpboot.c 	cpu = smp_processor_id();
cpu               943 arch/x86/kernel/smpboot.c 	if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
cpu               962 arch/x86/kernel/smpboot.c wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
cpu               973 arch/x86/kernel/smpboot.c 	if (cpu) {
cpu              1002 arch/x86/kernel/smpboot.c int common_cpu_up(unsigned int cpu, struct task_struct *idle)
cpu              1009 arch/x86/kernel/smpboot.c 	per_cpu(current_task, cpu) = idle;
cpu              1012 arch/x86/kernel/smpboot.c 	ret = irq_init_percpu_irqstack(cpu);
cpu              1018 arch/x86/kernel/smpboot.c 	per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
cpu              1020 arch/x86/kernel/smpboot.c 	initial_gs = per_cpu_offset(cpu);
cpu              1031 arch/x86/kernel/smpboot.c static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
cpu              1041 arch/x86/kernel/smpboot.c 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
cpu              1046 arch/x86/kernel/smpboot.c 	init_espfix_ap(cpu);
cpu              1049 arch/x86/kernel/smpboot.c 	announce_cpu(cpu, apicid);
cpu              1076 arch/x86/kernel/smpboot.c 	cpumask_clear_cpu(cpu, cpu_initialized_mask);
cpu              1088 arch/x86/kernel/smpboot.c 		boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
cpu              1098 arch/x86/kernel/smpboot.c 			if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
cpu              1102 arch/x86/kernel/smpboot.c 				cpumask_set_cpu(cpu, cpu_callout_mask);
cpu              1114 arch/x86/kernel/smpboot.c 		while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
cpu              1135 arch/x86/kernel/smpboot.c int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu              1137 arch/x86/kernel/smpboot.c 	int apicid = apic->cpu_present_to_apicid(cpu);
cpu              1144 arch/x86/kernel/smpboot.c 	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
cpu              1149 arch/x86/kernel/smpboot.c 		pr_err("%s: bad cpu %d\n", __func__, cpu);
cpu              1156 arch/x86/kernel/smpboot.c 	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
cpu              1157 arch/x86/kernel/smpboot.c 		pr_debug("do_boot_cpu %d Already started\n", cpu);
cpu              1168 arch/x86/kernel/smpboot.c 	err = cpu_check_up_prepare(cpu);
cpu              1173 arch/x86/kernel/smpboot.c 	per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
cpu              1175 arch/x86/kernel/smpboot.c 	err = common_cpu_up(cpu, tidle);
cpu              1179 arch/x86/kernel/smpboot.c 	err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
cpu              1181 arch/x86/kernel/smpboot.c 		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
cpu              1191 arch/x86/kernel/smpboot.c 	check_tsc_sync_source(cpu);
cpu              1194 arch/x86/kernel/smpboot.c 	while (!cpu_online(cpu)) {
cpu              1250 arch/x86/kernel/smpboot.c 		unsigned int cpu;
cpu              1257 arch/x86/kernel/smpboot.c 		for_each_present_cpu(cpu) {
cpu              1259 arch/x86/kernel/smpboot.c 				set_cpu_present(cpu, false);
cpu              1264 arch/x86/kernel/smpboot.c 		for_each_possible_cpu(cpu) {
cpu              1266 arch/x86/kernel/smpboot.c 				set_cpu_possible(cpu, false);
cpu              1467 arch/x86/kernel/smpboot.c 			int cpu = hard_smp_processor_id();
cpu              1469 arch/x86/kernel/smpboot.c 			pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
cpu              1528 arch/x86/kernel/smpboot.c 	int max_threads, cpu;
cpu              1531 arch/x86/kernel/smpboot.c 	for_each_online_cpu (cpu) {
cpu              1532 arch/x86/kernel/smpboot.c 		int threads = cpumask_weight(topology_sibling_cpumask(cpu));
cpu              1540 arch/x86/kernel/smpboot.c static void remove_siblinginfo(int cpu)
cpu              1543 arch/x86/kernel/smpboot.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu              1545 arch/x86/kernel/smpboot.c 	for_each_cpu(sibling, topology_core_cpumask(cpu)) {
cpu              1546 arch/x86/kernel/smpboot.c 		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
cpu              1550 arch/x86/kernel/smpboot.c 		if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
cpu              1554 arch/x86/kernel/smpboot.c 	for_each_cpu(sibling, topology_die_cpumask(cpu))
cpu              1555 arch/x86/kernel/smpboot.c 		cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
cpu              1556 arch/x86/kernel/smpboot.c 	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
cpu              1557 arch/x86/kernel/smpboot.c 		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
cpu              1558 arch/x86/kernel/smpboot.c 	for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
cpu              1559 arch/x86/kernel/smpboot.c 		cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
cpu              1560 arch/x86/kernel/smpboot.c 	cpumask_clear(cpu_llc_shared_mask(cpu));
cpu              1561 arch/x86/kernel/smpboot.c 	cpumask_clear(topology_sibling_cpumask(cpu));
cpu              1562 arch/x86/kernel/smpboot.c 	cpumask_clear(topology_core_cpumask(cpu));
cpu              1563 arch/x86/kernel/smpboot.c 	cpumask_clear(topology_die_cpumask(cpu));
cpu              1566 arch/x86/kernel/smpboot.c 	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
cpu              1570 arch/x86/kernel/smpboot.c static void remove_cpu_from_maps(int cpu)
cpu              1572 arch/x86/kernel/smpboot.c 	set_cpu_online(cpu, false);
cpu              1573 arch/x86/kernel/smpboot.c 	cpumask_clear_cpu(cpu, cpu_callout_mask);
cpu              1574 arch/x86/kernel/smpboot.c 	cpumask_clear_cpu(cpu, cpu_callin_mask);
cpu              1576 arch/x86/kernel/smpboot.c 	cpumask_clear_cpu(cpu, cpu_initialized_mask);
cpu              1577 arch/x86/kernel/smpboot.c 	numa_remove_cpu(cpu);
cpu              1582 arch/x86/kernel/smpboot.c 	int cpu = smp_processor_id();
cpu              1584 arch/x86/kernel/smpboot.c 	remove_siblinginfo(cpu);
cpu              1588 arch/x86/kernel/smpboot.c 	remove_cpu_from_maps(cpu);
cpu              1613 arch/x86/kernel/smpboot.c int common_cpu_die(unsigned int cpu)
cpu              1620 arch/x86/kernel/smpboot.c 	if (cpu_wait_death(cpu, 5)) {
cpu              1622 arch/x86/kernel/smpboot.c 			pr_info("CPU %u is now offline\n", cpu);
cpu              1624 arch/x86/kernel/smpboot.c 		pr_err("CPU %u didn't die...\n", cpu);
cpu              1631 arch/x86/kernel/smpboot.c void native_cpu_die(unsigned int cpu)
cpu              1633 arch/x86/kernel/smpboot.c 	common_cpu_die(cpu);
cpu              1763 arch/x86/kernel/smpboot.c void native_cpu_die(unsigned int cpu)
cpu               328 arch/x86/kernel/tboot.c static int tboot_dying_cpu(unsigned int cpu)
cpu                89 arch/x86/kernel/tls.c 	int cpu;
cpu                94 arch/x86/kernel/tls.c 	cpu = get_cpu();
cpu               106 arch/x86/kernel/tls.c 		load_TLS(t, cpu);
cpu                60 arch/x86/kernel/topology.c int _debug_hotplug_cpu(int cpu, int action)
cpu                62 arch/x86/kernel/topology.c 	struct device *dev = get_cpu_device(cpu);
cpu                65 arch/x86/kernel/topology.c 	if (!cpu_is_hotpluggable(cpu))
cpu                72 arch/x86/kernel/topology.c 		ret = cpu_down(cpu);
cpu                74 arch/x86/kernel/topology.c 			pr_info("DEBUG_HOTPLUG_CPU0: CPU %u is now offline\n", cpu);
cpu                78 arch/x86/kernel/topology.c 			pr_debug("Can't offline CPU%d.\n", cpu);
cpu                81 arch/x86/kernel/topology.c 		ret = cpu_up(cpu);
cpu                86 arch/x86/kernel/topology.c 			pr_debug("Can't online CPU%d.\n", cpu);
cpu               142 arch/x86/kernel/topology.c 		per_cpu(cpu_devices, num).cpu.hotpluggable = 1;
cpu               144 arch/x86/kernel/topology.c 	return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
cpu               150 arch/x86/kernel/topology.c 	unregister_cpu(&per_cpu(cpu_devices, num).cpu);
cpu               157 arch/x86/kernel/topology.c 	return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
cpu               123 arch/x86/kernel/tsc.c static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
cpu               153 arch/x86/kernel/tsc.c 	c2n = per_cpu_ptr(&cyc2ns, cpu);
cpu               161 arch/x86/kernel/tsc.c static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
cpu               169 arch/x86/kernel/tsc.c 		__set_cyc2ns_scale(khz, cpu, tsc_now);
cpu               193 arch/x86/kernel/tsc.c 	unsigned int cpu, this_cpu = smp_processor_id();
cpu               197 arch/x86/kernel/tsc.c 	for_each_possible_cpu(cpu) {
cpu               198 arch/x86/kernel/tsc.c 		if (cpu != this_cpu) {
cpu               200 arch/x86/kernel/tsc.c 			c2n = per_cpu_ptr(&cyc2ns, cpu);
cpu               933 arch/x86/kernel/tsc.c 	int cpu;
cpu               951 arch/x86/kernel/tsc.c 	for_each_possible_cpu(cpu) {
cpu               952 arch/x86/kernel/tsc.c 		per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
cpu               953 arch/x86/kernel/tsc.c 		per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
cpu              1000 arch/x86/kernel/tsc.c 		set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
cpu              1296 arch/x86/kernel/tsc.c 	int cpu;
cpu              1349 arch/x86/kernel/tsc.c 	for_each_possible_cpu(cpu)
cpu              1350 arch/x86/kernel/tsc.c 		set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
cpu              1524 arch/x86/kernel/tsc.c 	int sibling, cpu = smp_processor_id();
cpu              1525 arch/x86/kernel/tsc.c 	int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
cpu              1526 arch/x86/kernel/tsc.c 	const struct cpumask *mask = topology_core_cpumask(cpu);
cpu              1531 arch/x86/kernel/tsc.c 	sibling = cpumask_any_but(mask, cpu);
cpu                81 arch/x86/kernel/tsc_sync.c 				   unsigned int cpu, bool bootcpu)
cpu               102 arch/x86/kernel/tsc_sync.c 				cpu, bootval);
cpu               107 arch/x86/kernel/tsc_sync.c 				cpu, bootval);
cpu               141 arch/x86/kernel/tsc_sync.c 	unsigned int refcpu, cpu = smp_processor_id();
cpu               167 arch/x86/kernel/tsc_sync.c 	mask = topology_core_cpumask(cpu);
cpu               168 arch/x86/kernel/tsc_sync.c 	refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids;
cpu               301 arch/x86/kernel/tsc_sync.c static inline unsigned int loop_timeout(int cpu)
cpu               303 arch/x86/kernel/tsc_sync.c 	return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
cpu               310 arch/x86/kernel/tsc_sync.c void check_tsc_sync_source(int cpu)
cpu               347 arch/x86/kernel/tsc_sync.c 	check_tsc_warp(loop_timeout(cpu));
cpu               361 arch/x86/kernel/tsc_sync.c 			smp_processor_id(), cpu);
cpu               368 arch/x86/kernel/tsc_sync.c 			smp_processor_id(), cpu);
cpu               403 arch/x86/kernel/tsc_sync.c 	unsigned int cpu = smp_processor_id();
cpu               434 arch/x86/kernel/tsc_sync.c 	cur_max_warp = check_tsc_warp(loop_timeout(cpu));
cpu               487 arch/x86/kernel/tsc_sync.c 		cpu, cur_max_warp, cur->adjusted);
cpu                33 arch/x86/kernel/x86_init.c void x86_op_int_noop(int cpu) { }
cpu               664 arch/x86/kvm/svm.c 	int cpu;
cpu               985 arch/x86/kvm/svm.c static void svm_cpu_uninit(int cpu)
cpu               998 arch/x86/kvm/svm.c static int svm_cpu_init(int cpu)
cpu              1005 arch/x86/kvm/svm.c 	sd->cpu = cpu;
cpu              1018 arch/x86/kvm/svm.c 	per_cpu(svm_data, cpu) = sd;
cpu              1343 arch/x86/kvm/svm.c 	int cpu;
cpu              1397 arch/x86/kvm/svm.c 	for_each_possible_cpu(cpu) {
cpu              1398 arch/x86/kvm/svm.c 		r = svm_cpu_init(cpu);
cpu              1461 arch/x86/kvm/svm.c 	int cpu;
cpu              1466 arch/x86/kvm/svm.c 	for_each_possible_cpu(cpu)
cpu              1467 arch/x86/kvm/svm.c 		svm_cpu_uninit(cpu);
cpu              1777 arch/x86/kvm/svm.c 	int cpu, pos;
cpu              1782 arch/x86/kvm/svm.c 	for_each_possible_cpu(cpu) {
cpu              1783 arch/x86/kvm/svm.c 		sd = per_cpu(svm_data, cpu);
cpu              2047 arch/x86/kvm/svm.c avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
cpu              2067 arch/x86/kvm/svm.c 		ret = amd_iommu_update_ga(cpu, r, ir->data);
cpu              2076 arch/x86/kvm/svm.c static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu              2080 arch/x86/kvm/svm.c 	int h_physical_id = kvm_cpu_get_apicid(cpu);
cpu              2133 arch/x86/kvm/svm.c 		avic_vcpu_load(vcpu, vcpu->cpu);
cpu              2312 arch/x86/kvm/svm.c static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu              2315 arch/x86/kvm/svm.c 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
cpu              2318 arch/x86/kvm/svm.c 	if (unlikely(cpu != vcpu->cpu)) {
cpu              2348 arch/x86/kvm/svm.c 	avic_vcpu_load(vcpu, cpu);
cpu              5051 arch/x86/kvm/svm.c 	int cpu = raw_smp_processor_id();
cpu              5053 arch/x86/kvm/svm.c 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
cpu              5058 arch/x86/kvm/svm.c static void pre_sev_run(struct vcpu_svm *svm, int cpu)
cpu              5060 arch/x86/kvm/svm.c 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
cpu              5073 arch/x86/kvm/svm.c 	    svm->last_cpu == cpu)
cpu              5076 arch/x86/kvm/svm.c 	svm->last_cpu = cpu;
cpu              5084 arch/x86/kvm/svm.c 	int cpu = raw_smp_processor_id();
cpu              5086 arch/x86/kvm/svm.c 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
cpu              5089 arch/x86/kvm/svm.c 		return pre_sev_run(svm, cpu);
cpu              5199 arch/x86/kvm/svm.c 		int cpuid = vcpu->cpu;
cpu              5678 arch/x86/kvm/svm.c 		smp_send_reschedule(vcpu->cpu);
cpu              6235 arch/x86/kvm/svm.c static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
cpu               297 arch/x86/kvm/vmx/nested.c 	int cpu;
cpu               302 arch/x86/kvm/vmx/nested.c 	cpu = get_cpu();
cpu               305 arch/x86/kvm/vmx/nested.c 	vmx_vcpu_load_vmcs(vcpu, cpu, prev);
cpu              3633 arch/x86/kvm/vmx/nested.c 	int cpu;
cpu              3641 arch/x86/kvm/vmx/nested.c 	cpu = get_cpu();
cpu              3643 arch/x86/kvm/vmx/nested.c 	vmx_vcpu_load(&vmx->vcpu, cpu);
cpu              3648 arch/x86/kvm/vmx/nested.c 	vmx_vcpu_load(&vmx->vcpu, cpu);
cpu                61 arch/x86/kvm/vmx/vmcs.h 	int cpu;
cpu               646 arch/x86/kvm/vmx/vmx.c 	loaded_vmcs->cpu = -1;
cpu               653 arch/x86/kvm/vmx/vmx.c 	int cpu = raw_smp_processor_id();
cpu               656 arch/x86/kvm/vmx/vmx.c 	list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
cpu               665 arch/x86/kvm/vmx/vmx.c 	int cpu = raw_smp_processor_id();
cpu               667 arch/x86/kvm/vmx/vmx.c 	if (loaded_vmcs->cpu != cpu)
cpu               669 arch/x86/kvm/vmx/vmx.c 	if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
cpu               670 arch/x86/kvm/vmx/vmx.c 		per_cpu(current_vmcs, cpu) = NULL;
cpu               687 arch/x86/kvm/vmx/vmx.c 	loaded_vmcs->cpu = -1;
cpu               693 arch/x86/kvm/vmx/vmx.c 	int cpu = loaded_vmcs->cpu;
cpu               695 arch/x86/kvm/vmx/vmx.c 	if (cpu != -1)
cpu               696 arch/x86/kvm/vmx/vmx.c 		smp_call_function_single(cpu,
cpu              1112 arch/x86/kvm/vmx/vmx.c 	int cpu = raw_smp_processor_id();
cpu              1148 arch/x86/kvm/vmx/vmx.c 	gs_base = cpu_kernelmode_gs_base(cpu);
cpu              1233 arch/x86/kvm/vmx/vmx.c static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
cpu              1245 arch/x86/kvm/vmx/vmx.c 	if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
cpu              1255 arch/x86/kvm/vmx/vmx.c 	if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
cpu              1264 arch/x86/kvm/vmx/vmx.c 		dest = cpu_physical_id(cpu);
cpu              1289 arch/x86/kvm/vmx/vmx.c void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
cpu              1293 arch/x86/kvm/vmx/vmx.c 	bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
cpu              1309 arch/x86/kvm/vmx/vmx.c 			 &per_cpu(loaded_vmcss_on_cpu, cpu));
cpu              1313 arch/x86/kvm/vmx/vmx.c 	prev = per_cpu(current_vmcs, cpu);
cpu              1315 arch/x86/kvm/vmx/vmx.c 		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
cpu              1338 arch/x86/kvm/vmx/vmx.c 			    (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
cpu              1352 arch/x86/kvm/vmx/vmx.c 		vmx->loaded_vmcs->cpu = cpu;
cpu              1365 arch/x86/kvm/vmx/vmx.c void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu              1369 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
cpu              1371 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_pi_load(vcpu, cpu);
cpu              2225 arch/x86/kvm/vmx/vmx.c 	int cpu = raw_smp_processor_id();
cpu              2226 arch/x86/kvm/vmx/vmx.c 	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
cpu              2237 arch/x86/kvm/vmx/vmx.c 	    !hv_get_vp_assist_page(cpu))
cpu              2260 arch/x86/kvm/vmx/vmx.c 	int cpu = raw_smp_processor_id();
cpu              2263 arch/x86/kvm/vmx/vmx.c 	list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
cpu              2502 arch/x86/kvm/vmx/vmx.c struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
cpu              2504 arch/x86/kvm/vmx/vmx.c 	int node = cpu_to_node(cpu);
cpu              2585 arch/x86/kvm/vmx/vmx.c 	int cpu;
cpu              2587 arch/x86/kvm/vmx/vmx.c 	for_each_possible_cpu(cpu) {
cpu              2588 arch/x86/kvm/vmx/vmx.c 		free_vmcs(per_cpu(vmxarea, cpu));
cpu              2589 arch/x86/kvm/vmx/vmx.c 		per_cpu(vmxarea, cpu) = NULL;
cpu              2595 arch/x86/kvm/vmx/vmx.c 	int cpu;
cpu              2597 arch/x86/kvm/vmx/vmx.c 	for_each_possible_cpu(cpu) {
cpu              2600 arch/x86/kvm/vmx/vmx.c 		vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
cpu              2619 arch/x86/kvm/vmx/vmx.c 		per_cpu(vmxarea, cpu) = vmcs;
cpu              3792 arch/x86/kvm/vmx/vmx.c 		apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
cpu              5311 arch/x86/kvm/vmx/vmx.c 	int cpu = smp_processor_id();
cpu              5313 arch/x86/kvm/vmx/vmx.c 	spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
cpu              5314 arch/x86/kvm/vmx/vmx.c 	list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
cpu              5321 arch/x86/kvm/vmx/vmx.c 	spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
cpu              6679 arch/x86/kvm/vmx/vmx.c 	int cpu;
cpu              6752 arch/x86/kvm/vmx/vmx.c 	cpu = get_cpu();
cpu              6753 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_load(&vmx->vcpu, cpu);
cpu              6754 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.cpu = cpu;
cpu              7251 arch/x86/kvm/vmx/vmx.c static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
cpu              7329 arch/x86/kvm/vmx/vmx.c 		dest = cpu_physical_id(vcpu->cpu);
cpu              7376 arch/x86/kvm/vmx/vmx.c 		vcpu->pre_pcpu = vcpu->cpu;
cpu              7960 arch/x86/kvm/vmx/vmx.c 		int cpu;
cpu              7967 arch/x86/kvm/vmx/vmx.c 		for_each_online_cpu(cpu) {
cpu              7968 arch/x86/kvm/vmx/vmx.c 			vp_ap =	hv_get_vp_assist_page(cpu);
cpu              7987 arch/x86/kvm/vmx/vmx.c 	int r, cpu;
cpu              7999 arch/x86/kvm/vmx/vmx.c 		int cpu;
cpu              8002 arch/x86/kvm/vmx/vmx.c 		for_each_online_cpu(cpu) {
cpu              8003 arch/x86/kvm/vmx/vmx.c 			if (!hv_get_vp_assist_page(cpu)) {
cpu              8041 arch/x86/kvm/vmx/vmx.c 	for_each_possible_cpu(cpu) {
cpu              8042 arch/x86/kvm/vmx/vmx.c 		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
cpu              8043 arch/x86/kvm/vmx/vmx.c 		INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
cpu              8044 arch/x86/kvm/vmx/vmx.c 		spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
cpu               307 arch/x86/kvm/vmx/vmx.h void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
cpu               309 arch/x86/kvm/vmx/vmx.h void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
cpu               476 arch/x86/kvm/vmx/vmx.h struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
cpu               268 arch/x86/kvm/x86.c 	unsigned int cpu = smp_processor_id();
cpu               269 arch/x86/kvm/x86.c 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
cpu               301 arch/x86/kvm/x86.c 	unsigned int cpu = smp_processor_id();
cpu               302 arch/x86/kvm/x86.c 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
cpu               324 arch/x86/kvm/x86.c 	unsigned int cpu = smp_processor_id();
cpu               325 arch/x86/kvm/x86.c 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
cpu              3493 arch/x86/kvm/x86.c void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu              3498 arch/x86/kvm/x86.c 			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
cpu              3499 arch/x86/kvm/x86.c 		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
cpu              3500 arch/x86/kvm/x86.c 			smp_call_function_single(vcpu->cpu,
cpu              3504 arch/x86/kvm/x86.c 	kvm_x86_ops->vcpu_load(vcpu, cpu);
cpu              3513 arch/x86/kvm/x86.c 	if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
cpu              3533 arch/x86/kvm/x86.c 		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
cpu              3535 arch/x86/kvm/x86.c 		if (vcpu->cpu != cpu)
cpu              3537 arch/x86/kvm/x86.c 		vcpu->cpu = cpu;
cpu              5986 arch/x86/kvm/x86.c 		int cpu = get_cpu();
cpu              5988 arch/x86/kvm/x86.c 		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
cpu              6969 arch/x86/kvm/x86.c static int kvmclock_cpu_down_prep(unsigned int cpu)
cpu              6994 arch/x86/kvm/x86.c 	int cpu;
cpu              7003 arch/x86/kvm/x86.c 	for_each_present_cpu(cpu)
cpu              7004 arch/x86/kvm/x86.c 		per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
cpu              7014 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(cpu, vcpu, kvm)
cpu              7017 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(cpu, vcpu, kvm)
cpu              7026 arch/x86/kvm/x86.c static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
cpu              7071 arch/x86/kvm/x86.c 	smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
cpu              7076 arch/x86/kvm/x86.c 			if (vcpu->cpu != cpu)
cpu              7079 arch/x86/kvm/x86.c 			if (vcpu->cpu != raw_smp_processor_id())
cpu              7098 arch/x86/kvm/x86.c 		smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
cpu              7106 arch/x86/kvm/x86.c 	int cpu;
cpu              7113 arch/x86/kvm/x86.c 	for_each_cpu(cpu, freq->policy->cpus)
cpu              7114 arch/x86/kvm/x86.c 		__kvmclock_cpufreq_notifier(freq, cpu);
cpu              7123 arch/x86/kvm/x86.c static int kvmclock_cpu_online(unsigned int cpu)
cpu              7136 arch/x86/kvm/x86.c 		int cpu;
cpu              7139 arch/x86/kvm/x86.c 		cpu = get_cpu();
cpu              7140 arch/x86/kvm/x86.c 		cpufreq_get_policy(&policy, cpu);
cpu              8020 arch/x86/kvm/x86.c 	smp_send_reschedule(vcpu->cpu);
cpu              9315 arch/x86/kvm/x86.c 			if (!stable && vcpu->cpu == smp_processor_id())
cpu              9537 arch/x86/kvm/x86.c void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
cpu              9540 arch/x86/kvm/x86.c 	kvm_x86_ops->sched_in(vcpu, cpu);
cpu                10 arch/x86/lib/cache-smp.c void wbinvd_on_cpu(int cpu)
cpu                12 arch/x86/lib/cache-smp.c 	smp_call_function_single(cpu, __wbinvd, NULL, 1);
cpu                55 arch/x86/lib/delay.c 	int cpu;
cpu                58 arch/x86/lib/delay.c 	cpu = smp_processor_id();
cpu                79 arch/x86/lib/delay.c 		if (unlikely(cpu != smp_processor_id())) {
cpu                81 arch/x86/lib/delay.c 			cpu = smp_processor_id();
cpu                36 arch/x86/lib/msr-smp.c int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
cpu                44 arch/x86/lib/msr-smp.c 	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
cpu                52 arch/x86/lib/msr-smp.c int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
cpu                60 arch/x86/lib/msr-smp.c 	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
cpu                67 arch/x86/lib/msr-smp.c int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
cpu                77 arch/x86/lib/msr-smp.c 	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
cpu                83 arch/x86/lib/msr-smp.c int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
cpu                93 arch/x86/lib/msr-smp.c 	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
cpu               169 arch/x86/lib/msr-smp.c int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
cpu               182 arch/x86/lib/msr-smp.c 	err = smp_call_function_single_async(cpu, &csd);
cpu               194 arch/x86/lib/msr-smp.c int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
cpu               204 arch/x86/lib/msr-smp.c 	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
cpu               210 arch/x86/lib/msr-smp.c int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
cpu               220 arch/x86/lib/msr-smp.c 	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
cpu               226 arch/x86/lib/msr-smp.c int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
cpu               231 arch/x86/lib/msr-smp.c 	err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
cpu               256 arch/x86/lib/msr-smp.c int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
cpu               263 arch/x86/lib/msr-smp.c 	err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
cpu               269 arch/x86/lib/msr-smp.c int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
cpu               276 arch/x86/lib/msr-smp.c 	err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
cpu                20 arch/x86/mm/cpu_entry_area.c struct cpu_entry_area *get_cpu_entry_area(int cpu)
cpu                22 arch/x86/mm/cpu_entry_area.c 	unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
cpu                55 arch/x86/mm/cpu_entry_area.c static void __init percpu_setup_debug_store(unsigned int cpu)
cpu                64 arch/x86/mm/cpu_entry_area.c 	cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
cpu                67 arch/x86/mm/cpu_entry_area.c 	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
cpu                70 arch/x86/mm/cpu_entry_area.c 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
cpu                89 arch/x86/mm/cpu_entry_area.c static void __init percpu_setup_exception_stacks(unsigned int cpu)
cpu                91 arch/x86/mm/cpu_entry_area.c 	struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
cpu                92 arch/x86/mm/cpu_entry_area.c 	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
cpu                97 arch/x86/mm/cpu_entry_area.c 	per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
cpu               111 arch/x86/mm/cpu_entry_area.c static inline void percpu_setup_exception_stacks(unsigned int cpu) {}
cpu               115 arch/x86/mm/cpu_entry_area.c static void __init setup_cpu_entry_area(unsigned int cpu)
cpu               117 arch/x86/mm/cpu_entry_area.c 	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
cpu               138 arch/x86/mm/cpu_entry_area.c 	cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
cpu               141 arch/x86/mm/cpu_entry_area.c 			     per_cpu_ptr(&entry_stack_storage, cpu), 1,
cpu               164 arch/x86/mm/cpu_entry_area.c 	cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
cpu               168 arch/x86/mm/cpu_entry_area.c 	per_cpu(cpu_entry_area, cpu) = cea;
cpu               171 arch/x86/mm/cpu_entry_area.c 	percpu_setup_exception_stacks(cpu);
cpu               173 arch/x86/mm/cpu_entry_area.c 	percpu_setup_debug_store(cpu);
cpu               197 arch/x86/mm/cpu_entry_area.c 	unsigned int cpu;
cpu               201 arch/x86/mm/cpu_entry_area.c 	for_each_possible_cpu(cpu)
cpu               202 arch/x86/mm/cpu_entry_area.c 		setup_cpu_entry_area(cpu);
cpu               372 arch/x86/mm/mmio-mod.c 	int cpu;
cpu               388 arch/x86/mm/mmio-mod.c 	for_each_cpu(cpu, downed_cpus) {
cpu               389 arch/x86/mm/mmio-mod.c 		err = cpu_down(cpu);
cpu               391 arch/x86/mm/mmio-mod.c 			pr_info("CPU%d is down.\n", cpu);
cpu               393 arch/x86/mm/mmio-mod.c 			pr_err("Error taking CPU%d down: %d\n", cpu, err);
cpu               402 arch/x86/mm/mmio-mod.c 	int cpu;
cpu               408 arch/x86/mm/mmio-mod.c 	for_each_cpu(cpu, downed_cpus) {
cpu               409 arch/x86/mm/mmio-mod.c 		err = cpu_up(cpu);
cpu               411 arch/x86/mm/mmio-mod.c 			pr_info("enabled CPU%d.\n", cpu);
cpu               413 arch/x86/mm/mmio-mod.c 			pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
cpu                62 arch/x86/mm/numa.c int numa_cpu_node(int cpu)
cpu                64 arch/x86/mm/numa.c 	int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
cpu                80 arch/x86/mm/numa.c void numa_set_node(int cpu, int node)
cpu                86 arch/x86/mm/numa.c 		cpu_to_node_map[cpu] = node;
cpu                91 arch/x86/mm/numa.c 	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
cpu                92 arch/x86/mm/numa.c 		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
cpu                97 arch/x86/mm/numa.c 	per_cpu(x86_cpu_to_node_map, cpu) = node;
cpu                99 arch/x86/mm/numa.c 	set_cpu_numa_node(cpu, node);
cpu               102 arch/x86/mm/numa.c void numa_clear_node(int cpu)
cpu               104 arch/x86/mm/numa.c 	numa_set_node(cpu, NUMA_NO_NODE);
cpu               752 arch/x86/mm/numa.c 	int cpu;
cpu               757 arch/x86/mm/numa.c 	for_each_possible_cpu(cpu) {
cpu               758 arch/x86/mm/numa.c 		int node = numa_cpu_node(cpu);
cpu               766 arch/x86/mm/numa.c 		numa_set_node(cpu, node);
cpu               773 arch/x86/mm/numa.c void numa_add_cpu(int cpu)
cpu               775 arch/x86/mm/numa.c 	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
cpu               778 arch/x86/mm/numa.c void numa_remove_cpu(int cpu)
cpu               780 arch/x86/mm/numa.c 	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
cpu               786 arch/x86/mm/numa.c int __cpu_to_node(int cpu)
cpu               790 arch/x86/mm/numa.c 			"cpu_to_node(%d): usage too early!\n", cpu);
cpu               792 arch/x86/mm/numa.c 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
cpu               794 arch/x86/mm/numa.c 	return per_cpu(x86_cpu_to_node_map, cpu);
cpu               802 arch/x86/mm/numa.c int early_cpu_to_node(int cpu)
cpu               805 arch/x86/mm/numa.c 		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
cpu               807 arch/x86/mm/numa.c 	if (!cpu_possible(cpu)) {
cpu               809 arch/x86/mm/numa.c 			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
cpu               813 arch/x86/mm/numa.c 	return per_cpu(x86_cpu_to_node_map, cpu);
cpu               816 arch/x86/mm/numa.c void debug_cpumask_set_cpu(int cpu, int node, bool enable)
cpu               832 arch/x86/mm/numa.c 		cpumask_set_cpu(cpu, mask);
cpu               834 arch/x86/mm/numa.c 		cpumask_clear_cpu(cpu, mask);
cpu               838 arch/x86/mm/numa.c 		cpu, node, cpumask_pr_args(mask));
cpu               843 arch/x86/mm/numa.c static void numa_set_cpumask(int cpu, bool enable)
cpu               845 arch/x86/mm/numa.c 	debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
cpu               848 arch/x86/mm/numa.c void numa_add_cpu(int cpu)
cpu               850 arch/x86/mm/numa.c 	numa_set_cpumask(cpu, true);
cpu               853 arch/x86/mm/numa.c void numa_remove_cpu(int cpu)
cpu               855 arch/x86/mm/numa.c 	numa_set_cpumask(cpu, false);
cpu               531 arch/x86/mm/numa_emulation.c void numa_add_cpu(int cpu)
cpu               535 arch/x86/mm/numa_emulation.c 	nid = early_cpu_to_node(cpu);
cpu               546 arch/x86/mm/numa_emulation.c 			cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
cpu               549 arch/x86/mm/numa_emulation.c void numa_remove_cpu(int cpu)
cpu               554 arch/x86/mm/numa_emulation.c 		cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
cpu               557 arch/x86/mm/numa_emulation.c static void numa_set_cpumask(int cpu, bool enable)
cpu               561 arch/x86/mm/numa_emulation.c 	nid = early_cpu_to_node(cpu);
cpu               573 arch/x86/mm/numa_emulation.c 		debug_cpumask_set_cpu(cpu, nid, enable);
cpu               577 arch/x86/mm/numa_emulation.c void numa_add_cpu(int cpu)
cpu               579 arch/x86/mm/numa_emulation.c 	numa_set_cpumask(cpu, true);
cpu               582 arch/x86/mm/numa_emulation.c void numa_remove_cpu(int cpu)
cpu               584 arch/x86/mm/numa_emulation.c 	numa_set_cpumask(cpu, false);
cpu               438 arch/x86/mm/pti.c 	unsigned int cpu;
cpu               442 arch/x86/mm/pti.c 	for_each_possible_cpu(cpu) {
cpu               459 arch/x86/mm/pti.c 		unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
cpu               132 arch/x86/mm/tlb.c void leave_mm(int cpu)
cpu               281 arch/x86/mm/tlb.c 	unsigned cpu = smp_processor_id();
cpu               343 arch/x86/mm/tlb.c 				 !cpumask_test_cpu(cpu, mm_cpumask(next))))
cpu               344 arch/x86/mm/tlb.c 			cpumask_set_cpu(cpu, mm_cpumask(next));
cpu               395 arch/x86/mm/tlb.c 			VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
cpu               397 arch/x86/mm/tlb.c 			cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
cpu               404 arch/x86/mm/tlb.c 			cpumask_set_cpu(cpu, mm_cpumask(next));
cpu               658 arch/x86/mm/tlb.c static bool tlb_is_not_lazy(int cpu, void *data)
cpu               660 arch/x86/mm/tlb.c 	return !per_cpu(cpu_tlbstate.is_lazy, cpu);
cpu               773 arch/x86/mm/tlb.c 	int cpu;
cpu               775 arch/x86/mm/tlb.c 	cpu = get_cpu();
cpu               797 arch/x86/mm/tlb.c 	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
cpu               860 arch/x86/mm/tlb.c 	int cpu = get_cpu();
cpu               862 arch/x86/mm/tlb.c 	if (cpumask_test_cpu(cpu, &batch->cpumask)) {
cpu               869 arch/x86/mm/tlb.c 	if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
cpu               181 arch/x86/oprofile/nmi_int.c static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
cpu               197 arch/x86/oprofile/nmi_int.c 	per_cpu(switch_index, cpu) = 0;
cpu               228 arch/x86/oprofile/nmi_int.c 	int cpu = smp_processor_id();
cpu               229 arch/x86/oprofile/nmi_int.c 	int si = per_cpu(switch_index, cpu);
cpu               230 arch/x86/oprofile/nmi_int.c 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
cpu               238 arch/x86/oprofile/nmi_int.c 		per_cpu(switch_index, cpu) = 0;
cpu               240 arch/x86/oprofile/nmi_int.c 		per_cpu(switch_index, cpu) = si;
cpu               280 arch/x86/oprofile/nmi_int.c static void mux_clone(int cpu)
cpu               285 arch/x86/oprofile/nmi_int.c 	memcpy(per_cpu(cpu_msrs, cpu).multiplex,
cpu               297 arch/x86/oprofile/nmi_int.c nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
cpu               299 arch/x86/oprofile/nmi_int.c static void mux_clone(int cpu) { }
cpu               344 arch/x86/oprofile/nmi_int.c 	int cpu = smp_processor_id();
cpu               345 arch/x86/oprofile/nmi_int.c 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
cpu               350 arch/x86/oprofile/nmi_int.c 	nmi_cpu_setup_mux(cpu, msrs);
cpu               352 arch/x86/oprofile/nmi_int.c 	per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
cpu               376 arch/x86/oprofile/nmi_int.c 	int cpu = smp_processor_id();
cpu               377 arch/x86/oprofile/nmi_int.c 	struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
cpu               386 arch/x86/oprofile/nmi_int.c 	apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
cpu               391 arch/x86/oprofile/nmi_int.c static int nmi_cpu_online(unsigned int cpu)
cpu               402 arch/x86/oprofile/nmi_int.c static int nmi_cpu_down_prep(unsigned int cpu)
cpu               448 arch/x86/oprofile/nmi_int.c 	int cpu;
cpu               462 arch/x86/oprofile/nmi_int.c 	for_each_possible_cpu(cpu) {
cpu               463 arch/x86/oprofile/nmi_int.c 		if (!IS_ENABLED(CONFIG_SMP) || !cpu)
cpu               466 arch/x86/oprofile/nmi_int.c 		memcpy(per_cpu(cpu_msrs, cpu).counters,
cpu               470 arch/x86/oprofile/nmi_int.c 		memcpy(per_cpu(cpu_msrs, cpu).controls,
cpu               474 arch/x86/oprofile/nmi_int.c 		mux_clone(cpu);
cpu               374 arch/x86/oprofile/op_model_p4.c 	int cpu = smp_processor_id();
cpu               375 arch/x86/oprofile/op_model_p4.c 	return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
cpu               332 arch/x86/pci/amd_bus.c static int amd_bus_cpu_online(unsigned int cpu)
cpu                42 arch/x86/pci/mmconfig_32.c 	int cpu = smp_processor_id();
cpu                44 arch/x86/pci/mmconfig_32.c 	    cpu != mmcfg_last_accessed_cpu) {
cpu                46 arch/x86/pci/mmconfig_32.c 		mmcfg_last_accessed_cpu = cpu;
cpu               139 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu               147 arch/x86/platform/uv/tlb_uv.c 	for_each_present_cpu(cpu) {
cpu               148 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
cpu               158 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu               162 arch/x86/platform/uv/tlb_uv.c 	for_each_present_cpu(cpu) {
cpu               163 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
cpu               191 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu               193 arch/x86/platform/uv/tlb_uv.c 	for_each_present_cpu(cpu)
cpu               194 arch/x86/platform/uv/tlb_uv.c 		if (uvhub == uv_cpu_to_blade_id(cpu))
cpu               195 arch/x86/platform/uv/tlb_uv.c 			return per_cpu(x86_cpu_to_apicid, cpu);
cpu               349 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu               352 arch/x86/platform/uv/tlb_uv.c 	for_each_present_cpu(cpu) {
cpu               353 arch/x86/platform/uv/tlb_uv.c 		hpp = &smaster->thp[cpu];
cpu               355 arch/x86/platform/uv/tlb_uv.c 			return cpu;
cpu               424 arch/x86/platform/uv/tlb_uv.c 	int sender = bcp->cpu;
cpu               435 arch/x86/platform/uv/tlb_uv.c 		int cpu;
cpu               439 arch/x86/platform/uv/tlb_uv.c 		cpu = pnode_to_first_cpu(apnode, smaster);
cpu               440 arch/x86/platform/uv/tlb_uv.c 		cpumask_set_cpu(cpu, mask);
cpu              1051 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu              1056 arch/x86/platform/uv/tlb_uv.c 	for_each_cpu(cpu, flush_mask) {
cpu              1063 arch/x86/platform/uv/tlb_uv.c 		hpp = &bcp->socket_master->thp[cpu];
cpu              1105 arch/x86/platform/uv/tlb_uv.c 	unsigned int cpu = smp_processor_id();
cpu              1113 arch/x86/platform/uv/tlb_uv.c 	bcp = &per_cpu(bau_control, cpu);
cpu              1144 arch/x86/platform/uv/tlb_uv.c 	flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
cpu              1146 arch/x86/platform/uv/tlb_uv.c 	cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
cpu              1148 arch/x86/platform/uv/tlb_uv.c 	if (cpumask_test_cpu(cpu, cpumask))
cpu              1169 arch/x86/platform/uv/tlb_uv.c 		bau_desc->payload.uv1_2_3.sending_cpu = cpu;
cpu              1173 arch/x86/platform/uv/tlb_uv.c 		bau_desc->payload.uv4.sending_cpu = cpu;
cpu              1400 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu              1402 arch/x86/platform/uv/tlb_uv.c 	cpu = *(loff_t *)data;
cpu              1403 arch/x86/platform/uv/tlb_uv.c 	if (!cpu) {
cpu              1417 arch/x86/platform/uv/tlb_uv.c 	if (cpu < num_possible_cpus() && cpu_online(cpu)) {
cpu              1418 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
cpu              1420 arch/x86/platform/uv/tlb_uv.c 			seq_printf(file, "cpu %d bau disabled\n", cpu);
cpu              1427 arch/x86/platform/uv/tlb_uv.c 			   cpu, bcp->nobau, stat->s_requestor,
cpu              1453 arch/x86/platform/uv/tlb_uv.c 			   ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
cpu              1497 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu              1530 arch/x86/platform/uv/tlb_uv.c 		for_each_present_cpu(cpu) {
cpu              1531 arch/x86/platform/uv/tlb_uv.c 			stat = &per_cpu(ptcstats, cpu);
cpu              1618 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu              1630 arch/x86/platform/uv/tlb_uv.c 	cpu = get_cpu();
cpu              1631 arch/x86/platform/uv/tlb_uv.c 	bcp = &per_cpu(bau_control, cpu);
cpu              1637 arch/x86/platform/uv/tlb_uv.c 	for_each_present_cpu(cpu) {
cpu              1638 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
cpu              1713 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu              1781 arch/x86/platform/uv/tlb_uv.c 	for_each_present_cpu(cpu) {
cpu              1782 arch/x86/platform/uv/tlb_uv.c 		if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
cpu              1784 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
cpu              1797 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu              1813 arch/x86/platform/uv/tlb_uv.c 	for_each_present_cpu(cpu) {
cpu              1814 arch/x86/platform/uv/tlb_uv.c 		if (pnode != uv_cpu_to_pnode(cpu))
cpu              1817 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
cpu              1909 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu              1912 arch/x86/platform/uv/tlb_uv.c 	for_each_present_cpu(cpu) {
cpu              1913 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
cpu              1917 arch/x86/platform/uv/tlb_uv.c 		bcp->statp			= &per_cpu(ptcstats, cpu);
cpu              1944 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu              1952 arch/x86/platform/uv/tlb_uv.c 	for_each_present_cpu(cpu) {
cpu              1953 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
cpu              1957 arch/x86/platform/uv/tlb_uv.c 		pnode = uv_cpu_hub_info(cpu)->pnode;
cpu              1961 arch/x86/platform/uv/tlb_uv.c 				cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
cpu              1965 arch/x86/platform/uv/tlb_uv.c 		bcp->osnode = cpu_to_node(cpu);
cpu              1968 arch/x86/platform/uv/tlb_uv.c 		uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
cpu              1981 arch/x86/platform/uv/tlb_uv.c 		sdp->cpu_number[sdp->num_cpus] = cpu;
cpu              1997 arch/x86/platform/uv/tlb_uv.c 	int cpu;
cpu              2001 arch/x86/platform/uv/tlb_uv.c 	for_each_present_cpu(cpu) {
cpu              2002 arch/x86/platform/uv/tlb_uv.c 		smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
cpu              2003 arch/x86/platform/uv/tlb_uv.c 		smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
cpu              2026 arch/x86/platform/uv/tlb_uv.c 	int i, cpu, uvhub_cpu;
cpu              2030 arch/x86/platform/uv/tlb_uv.c 		cpu = sdp->cpu_number[i];
cpu              2031 arch/x86/platform/uv/tlb_uv.c 		bcp = &per_cpu(bau_control, cpu);
cpu              2032 arch/x86/platform/uv/tlb_uv.c 		bcp->cpu = cpu;
cpu              2055 arch/x86/platform/uv/tlb_uv.c 		uvhub_cpu = uv_cpu_blade_processor_id(cpu);
cpu               184 arch/x86/platform/uv/uv_irq.c int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
cpu               193 arch/x86/platform/uv/uv_irq.c 	init_irq_alloc_info(&info, cpumask_of(cpu));
cpu               454 arch/x86/platform/uv/uv_nmi.c static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
cpu               459 arch/x86/platform/uv/uv_nmi.c 		atomic_set(&hub_nmi->cpu_owner, cpu);
cpu               461 arch/x86/platform/uv/uv_nmi.c 			atomic_set(&uv_nmi_cpu, cpu);
cpu               471 arch/x86/platform/uv/uv_nmi.c 	int cpu = smp_processor_id();
cpu               488 arch/x86/platform/uv/uv_nmi.c 				uv_set_in_nmi(cpu, hub_nmi);
cpu               519 arch/x86/platform/uv/uv_nmi.c 				uv_set_in_nmi(cpu, hub_nmi);
cpu               535 arch/x86/platform/uv/uv_nmi.c static inline void uv_clear_nmi(int cpu)
cpu               539 arch/x86/platform/uv/uv_nmi.c 	if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
cpu               553 arch/x86/platform/uv/uv_nmi.c 	int cpu;
cpu               555 arch/x86/platform/uv/uv_nmi.c 	for_each_cpu(cpu, uv_nmi_cpu_mask)
cpu               556 arch/x86/platform/uv/uv_nmi.c 		uv_cpu_nmi_per(cpu).pinging = 1;
cpu               564 arch/x86/platform/uv/uv_nmi.c 	int cpu;
cpu               566 arch/x86/platform/uv/uv_nmi.c 	for_each_cpu(cpu, uv_nmi_cpu_mask) {
cpu               567 arch/x86/platform/uv/uv_nmi.c 		uv_cpu_nmi_per(cpu).pinging =  0;
cpu               568 arch/x86/platform/uv/uv_nmi.c 		uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
cpu               569 arch/x86/platform/uv/uv_nmi.c 		cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
cpu               578 arch/x86/platform/uv/uv_nmi.c 	int cpu = smp_processor_id();
cpu               589 arch/x86/platform/uv/uv_nmi.c 		cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
cpu               668 arch/x86/platform/uv/uv_nmi.c static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
cpu               671 arch/x86/platform/uv/uv_nmi.c 		cpu, current->pid, current->comm, (void *)regs->ip);
cpu               681 arch/x86/platform/uv/uv_nmi.c static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
cpu               685 arch/x86/platform/uv/uv_nmi.c 	if (cpu == 0)
cpu               689 arch/x86/platform/uv/uv_nmi.c 		uv_nmi_dump_cpu_ip(cpu, regs);
cpu               692 arch/x86/platform/uv/uv_nmi.c 		pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
cpu               700 arch/x86/platform/uv/uv_nmi.c static void uv_nmi_trigger_dump(int cpu)
cpu               704 arch/x86/platform/uv/uv_nmi.c 	if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
cpu               707 arch/x86/platform/uv/uv_nmi.c 	uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
cpu               711 arch/x86/platform/uv/uv_nmi.c 		if (uv_cpu_nmi_per(cpu).state
cpu               716 arch/x86/platform/uv/uv_nmi.c 	pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
cpu               717 arch/x86/platform/uv/uv_nmi.c 	uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
cpu               735 arch/x86/platform/uv/uv_nmi.c static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
cpu               751 arch/x86/platform/uv/uv_nmi.c static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
cpu               760 arch/x86/platform/uv/uv_nmi.c 			atomic_read(&uv_nmi_cpus_in_nmi), cpu);
cpu               767 arch/x86/platform/uv/uv_nmi.c 			else if (tcpu == cpu)
cpu               782 arch/x86/platform/uv/uv_nmi.c 		uv_nmi_dump_state_cpu(cpu, regs);
cpu               798 arch/x86/platform/uv/uv_nmi.c static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
cpu               802 arch/x86/platform/uv/uv_nmi.c 		pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
cpu               820 arch/x86/platform/uv/uv_nmi.c static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
cpu               853 arch/x86/platform/uv/uv_nmi.c static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
cpu               863 arch/x86/platform/uv/uv_nmi.c 		ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
cpu               880 arch/x86/platform/uv/uv_nmi.c 			kgdb_nmicallback(cpu, regs);
cpu               886 arch/x86/platform/uv/uv_nmi.c static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
cpu               898 arch/x86/platform/uv/uv_nmi.c 	int cpu = smp_processor_id();
cpu               911 arch/x86/platform/uv/uv_nmi.c 	master = (atomic_read(&uv_nmi_cpu) == cpu);
cpu               915 arch/x86/platform/uv/uv_nmi.c 		uv_nmi_kdump(cpu, master, regs);
cpu               927 arch/x86/platform/uv/uv_nmi.c 		uv_nmi_action_health(cpu, regs, master);
cpu               929 arch/x86/platform/uv/uv_nmi.c 		uv_nmi_dump_state(cpu, regs, master);
cpu               931 arch/x86/platform/uv/uv_nmi.c 		uv_call_kgdb_kdb(cpu, regs, master);
cpu               942 arch/x86/platform/uv/uv_nmi.c 	uv_clear_nmi(cpu);
cpu              1006 arch/x86/platform/uv/uv_nmi.c 	int cpu;
cpu              1012 arch/x86/platform/uv/uv_nmi.c 	for_each_present_cpu(cpu) {
cpu              1013 arch/x86/platform/uv/uv_nmi.c 		int nid = cpu_to_node(cpu);
cpu              1023 arch/x86/platform/uv/uv_nmi.c 		uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
cpu                55 arch/x86/platform/uv/uv_time.c 	} cpu[1];
cpu                70 arch/x86/platform/uv/uv_time.c static void uv_rtc_send_IPI(int cpu)
cpu                75 arch/x86/platform/uv/uv_time.c 	apicid = cpu_physical_id(cpu);
cpu                98 arch/x86/platform/uv/uv_time.c static int uv_setup_intr(int cpu, u64 expires)
cpu               101 arch/x86/platform/uv/uv_time.c 	unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits;
cpu               102 arch/x86/platform/uv/uv_time.c 	int pnode = uv_cpu_to_pnode(cpu);
cpu               146 arch/x86/platform/uv/uv_time.c 	int cpu;
cpu               152 arch/x86/platform/uv/uv_time.c 	for_each_present_cpu(cpu) {
cpu               153 arch/x86/platform/uv/uv_time.c 		int nid = cpu_to_node(cpu);
cpu               154 arch/x86/platform/uv/uv_time.c 		int bid = uv_cpu_to_blade_id(cpu);
cpu               155 arch/x86/platform/uv/uv_time.c 		int bcpu = uv_cpu_blade_processor_id(cpu);
cpu               173 arch/x86/platform/uv/uv_time.c 		head->cpu[bcpu].lcpu = cpu;
cpu               174 arch/x86/platform/uv/uv_time.c 		head->cpu[bcpu].expires = ULLONG_MAX;
cpu               188 arch/x86/platform/uv/uv_time.c 		u64 exp = head->cpu[c].expires;
cpu               196 arch/x86/platform/uv/uv_time.c 		c = head->cpu[bcpu].lcpu;
cpu               211 arch/x86/platform/uv/uv_time.c static int uv_rtc_set_timer(int cpu, u64 expires)
cpu               213 arch/x86/platform/uv/uv_time.c 	int pnode = uv_cpu_to_pnode(cpu);
cpu               214 arch/x86/platform/uv/uv_time.c 	int bid = uv_cpu_to_blade_id(cpu);
cpu               216 arch/x86/platform/uv/uv_time.c 	int bcpu = uv_cpu_blade_processor_id(cpu);
cpu               217 arch/x86/platform/uv/uv_time.c 	u64 *t = &head->cpu[bcpu].expires;
cpu               228 arch/x86/platform/uv/uv_time.c 			expires < head->cpu[next_cpu].expires) {
cpu               230 arch/x86/platform/uv/uv_time.c 		if (uv_setup_intr(cpu, expires)) {
cpu               247 arch/x86/platform/uv/uv_time.c static int uv_rtc_unset_timer(int cpu, int force)
cpu               249 arch/x86/platform/uv/uv_time.c 	int pnode = uv_cpu_to_pnode(cpu);
cpu               250 arch/x86/platform/uv/uv_time.c 	int bid = uv_cpu_to_blade_id(cpu);
cpu               252 arch/x86/platform/uv/uv_time.c 	int bcpu = uv_cpu_blade_processor_id(cpu);
cpu               253 arch/x86/platform/uv/uv_time.c 	u64 *t = &head->cpu[bcpu].expires;
cpu               322 arch/x86/platform/uv/uv_time.c 	int cpu = smp_processor_id();
cpu               323 arch/x86/platform/uv/uv_time.c 	struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
cpu               328 arch/x86/platform/uv/uv_time.c 	if (uv_rtc_unset_timer(cpu, 0) != 1)
cpu               151 arch/x86/power/cpu.c 	int cpu = smp_processor_id();
cpu               153 arch/x86/power/cpu.c 	struct desc_struct *desc = get_cpu_gdt_rw(cpu);
cpu               164 arch/x86/power/cpu.c 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
cpu               183 arch/x86/power/cpu.c 	load_fixmap_gdt(cpu);
cpu               198 arch/x86/um/ptrace_32.c 	int err, n, cpu = task_cpu(child);
cpu               201 arch/x86/um/ptrace_32.c 	err = save_i387_registers(userspace_pid[cpu],
cpu               215 arch/x86/um/ptrace_32.c 	int n, cpu = task_cpu(child);
cpu               222 arch/x86/um/ptrace_32.c 	return restore_i387_registers(userspace_pid[cpu],
cpu               228 arch/x86/um/ptrace_32.c 	int err, n, cpu = task_cpu(child);
cpu               231 arch/x86/um/ptrace_32.c 	err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
cpu               244 arch/x86/um/ptrace_32.c 	int n, cpu = task_cpu(child);
cpu               251 arch/x86/um/ptrace_32.c 	return restore_fpx_registers(userspace_pid[cpu],
cpu               229 arch/x86/um/ptrace_64.c 	int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
cpu               232 arch/x86/um/ptrace_64.c 	err = save_i387_registers(userspace_pid[cpu],
cpu               246 arch/x86/um/ptrace_64.c 	int n, cpu = ((struct thread_info *) child->stack)->cpu;
cpu               253 arch/x86/um/ptrace_64.c 	return restore_i387_registers(userspace_pid[cpu],
cpu               203 arch/x86/um/signal.c 	pid = userspace_pid[current_thread_info()->cpu];
cpu               290 arch/x86/um/signal.c 	pid = userspace_pid[current_thread_info()->cpu];
cpu                26 arch/x86/um/tls_32.c 	u32 cpu;
cpu                28 arch/x86/um/tls_32.c 	cpu = get_cpu();
cpu                29 arch/x86/um/tls_32.c 	ret = os_set_thread_area(info, userspace_pid[cpu]);
cpu                42 arch/x86/um/tls_32.c 	u32 cpu;
cpu                44 arch/x86/um/tls_32.c 	cpu = get_cpu();
cpu                45 arch/x86/um/tls_32.c 	ret = os_get_thread_area(info, userspace_pid[cpu]);
cpu                53 arch/x86/um/vdso/um_vdso.c __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
cpu                59 arch/x86/um/vdso/um_vdso.c 	if (cpu)
cpu                60 arch/x86/um/vdso/um_vdso.c 		*cpu = 0;
cpu                67 arch/x86/um/vdso/um_vdso.c long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
cpu               131 arch/x86/xen/apic.c static int xen_x86_32_early_logical_apicid(int cpu)
cpu               134 arch/x86/xen/apic.c 	return 1 << cpu;
cpu               146 arch/x86/xen/apic.c static int xen_cpu_present_to_apicid(int cpu)
cpu               148 arch/x86/xen/apic.c 	if (cpu_present(cpu))
cpu               149 arch/x86/xen/apic.c 		return cpu_data(cpu).apicid;
cpu                98 arch/x86/xen/enlighten.c static int xen_cpu_up_online(unsigned int cpu)
cpu               100 arch/x86/xen/enlighten.c 	xen_init_lock_cpu(cpu);
cpu               123 arch/x86/xen/enlighten.c static int xen_vcpu_setup_restore(int cpu)
cpu               128 arch/x86/xen/enlighten.c 	xen_vcpu_info_reset(cpu);
cpu               135 arch/x86/xen/enlighten.c 	    (xen_hvm_domain() && cpu_online(cpu))) {
cpu               136 arch/x86/xen/enlighten.c 		rc = xen_vcpu_setup(cpu);
cpu               149 arch/x86/xen/enlighten.c 	int cpu, rc;
cpu               151 arch/x86/xen/enlighten.c 	for_each_possible_cpu(cpu) {
cpu               152 arch/x86/xen/enlighten.c 		bool other_cpu = (cpu != smp_processor_id());
cpu               155 arch/x86/xen/enlighten.c 		if (xen_vcpu_nr(cpu) == XEN_VCPU_ID_INVALID)
cpu               160 arch/x86/xen/enlighten.c 					   xen_vcpu_nr(cpu), NULL) > 0;
cpu               163 arch/x86/xen/enlighten.c 		    HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
cpu               167 arch/x86/xen/enlighten.c 			xen_setup_runstate_info(cpu);
cpu               169 arch/x86/xen/enlighten.c 		rc = xen_vcpu_setup_restore(cpu);
cpu               172 arch/x86/xen/enlighten.c 					"System will hang.\n", cpu, rc);
cpu               183 arch/x86/xen/enlighten.c 		    HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
cpu               188 arch/x86/xen/enlighten.c void xen_vcpu_info_reset(int cpu)
cpu               190 arch/x86/xen/enlighten.c 	if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) {
cpu               191 arch/x86/xen/enlighten.c 		per_cpu(xen_vcpu, cpu) =
cpu               192 arch/x86/xen/enlighten.c 			&HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
cpu               195 arch/x86/xen/enlighten.c 		per_cpu(xen_vcpu, cpu) = NULL;
cpu               199 arch/x86/xen/enlighten.c int xen_vcpu_setup(int cpu)
cpu               219 arch/x86/xen/enlighten.c 		if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
cpu               224 arch/x86/xen/enlighten.c 		vcpup = &per_cpu(xen_vcpu_info, cpu);
cpu               239 arch/x86/xen/enlighten.c 					 xen_vcpu_nr(cpu), &info);
cpu               243 arch/x86/xen/enlighten.c 				     cpu, err);
cpu               250 arch/x86/xen/enlighten.c 			per_cpu(xen_vcpu, cpu) = vcpup;
cpu               255 arch/x86/xen/enlighten.c 		xen_vcpu_info_reset(cpu);
cpu               257 arch/x86/xen/enlighten.c 	return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0);
cpu               263 arch/x86/xen/enlighten.c 	int cpu;
cpu               265 arch/x86/xen/enlighten.c 	for_each_online_cpu(cpu)
cpu               266 arch/x86/xen/enlighten.c 		xen_pmu_finish(cpu);
cpu               318 arch/x86/xen/enlighten.c void xen_pin_vcpu(int cpu)
cpu               327 arch/x86/xen/enlighten.c 	pin_override.pcpu = cpu;
cpu               331 arch/x86/xen/enlighten.c 	if (cpu < 0)
cpu               337 arch/x86/xen/enlighten.c 			cpu);
cpu               347 arch/x86/xen/enlighten.c 			cpu);
cpu               136 arch/x86/xen/enlighten_hvm.c static int xen_cpu_up_prepare_hvm(unsigned int cpu)
cpu               144 arch/x86/xen/enlighten_hvm.c 	if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
cpu               145 arch/x86/xen/enlighten_hvm.c 		xen_smp_intr_free(cpu);
cpu               146 arch/x86/xen/enlighten_hvm.c 		xen_uninit_lock_cpu(cpu);
cpu               149 arch/x86/xen/enlighten_hvm.c 	if (cpu_acpi_id(cpu) != U32_MAX)
cpu               150 arch/x86/xen/enlighten_hvm.c 		per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
cpu               152 arch/x86/xen/enlighten_hvm.c 		per_cpu(xen_vcpu_id, cpu) = cpu;
cpu               153 arch/x86/xen/enlighten_hvm.c 	rc = xen_vcpu_setup(cpu);
cpu               158 arch/x86/xen/enlighten_hvm.c 		xen_setup_timer(cpu);
cpu               160 arch/x86/xen/enlighten_hvm.c 	rc = xen_smp_intr_init(cpu);
cpu               163 arch/x86/xen/enlighten_hvm.c 		     cpu, rc);
cpu               168 arch/x86/xen/enlighten_hvm.c static int xen_cpu_dead_hvm(unsigned int cpu)
cpu               170 arch/x86/xen/enlighten_hvm.c 	xen_smp_intr_free(cpu);
cpu               173 arch/x86/xen/enlighten_hvm.c 		xen_teardown_timer(cpu);
cpu                94 arch/x86/xen/enlighten_pv.c static int xen_cpu_up_prepare_pv(unsigned int cpu);
cpu                95 arch/x86/xen/enlighten_pv.c static int xen_cpu_dead_pv(unsigned int cpu);
cpu               517 arch/x86/xen/enlighten_pv.c 				unsigned int cpu, unsigned int i)
cpu               519 arch/x86/xen/enlighten_pv.c 	struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
cpu               529 arch/x86/xen/enlighten_pv.c 	gdt = get_cpu_gdt_rw(cpu);
cpu               536 arch/x86/xen/enlighten_pv.c static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
cpu               566 arch/x86/xen/enlighten_pv.c 	load_TLS_descriptor(t, cpu, 0);
cpu               567 arch/x86/xen/enlighten_pv.c 	load_TLS_descriptor(t, cpu, 1);
cpu               568 arch/x86/xen/enlighten_pv.c 	load_TLS_descriptor(t, cpu, 2);
cpu               971 arch/x86/xen/enlighten_pv.c 	int cpu;
cpu               973 arch/x86/xen/enlighten_pv.c 	for_each_possible_cpu(cpu) {
cpu               975 arch/x86/xen/enlighten_pv.c 		per_cpu(xen_vcpu_id, cpu) = cpu;
cpu               985 arch/x86/xen/enlighten_pv.c 		(void) xen_vcpu_setup(cpu);
cpu              1169 arch/x86/xen/enlighten_pv.c static void __init xen_setup_gdt(int cpu)
cpu              1171 arch/x86/xen/enlighten_pv.c 	pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot;
cpu              1172 arch/x86/xen/enlighten_pv.c 	pv_ops.cpu.load_gdt = xen_load_gdt_boot;
cpu              1174 arch/x86/xen/enlighten_pv.c 	setup_stack_canary_segment(cpu);
cpu              1175 arch/x86/xen/enlighten_pv.c 	switch_to_new_gdt(cpu);
cpu              1177 arch/x86/xen/enlighten_pv.c 	pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
cpu              1178 arch/x86/xen/enlighten_pv.c 	pv_ops.cpu.load_gdt = xen_load_gdt;
cpu              1204 arch/x86/xen/enlighten_pv.c 	pv_ops.cpu = xen_cpu_ops;
cpu              1413 arch/x86/xen/enlighten_pv.c static int xen_cpu_up_prepare_pv(unsigned int cpu)
cpu              1417 arch/x86/xen/enlighten_pv.c 	if (per_cpu(xen_vcpu, cpu) == NULL)
cpu              1420 arch/x86/xen/enlighten_pv.c 	xen_setup_timer(cpu);
cpu              1422 arch/x86/xen/enlighten_pv.c 	rc = xen_smp_intr_init(cpu);
cpu              1425 arch/x86/xen/enlighten_pv.c 		     cpu, rc);
cpu              1429 arch/x86/xen/enlighten_pv.c 	rc = xen_smp_intr_init_pv(cpu);
cpu              1432 arch/x86/xen/enlighten_pv.c 		     cpu, rc);
cpu              1439 arch/x86/xen/enlighten_pv.c static int xen_cpu_dead_pv(unsigned int cpu)
cpu              1441 arch/x86/xen/enlighten_pv.c 	xen_smp_intr_free(cpu);
cpu              1442 arch/x86/xen/enlighten_pv.c 	xen_smp_intr_free_pv(cpu);
cpu              1444 arch/x86/xen/enlighten_pv.c 	xen_teardown_timer(cpu);
cpu              1014 arch/x86/xen/mmu_pv.c 	unsigned cpu;
cpu              1020 arch/x86/xen/mmu_pv.c 		for_each_online_cpu(cpu) {
cpu              1021 arch/x86/xen/mmu_pv.c 			if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
cpu              1023 arch/x86/xen/mmu_pv.c 			smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
cpu              1036 arch/x86/xen/mmu_pv.c 	for_each_online_cpu(cpu) {
cpu              1037 arch/x86/xen/mmu_pv.c 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
cpu              1038 arch/x86/xen/mmu_pv.c 			cpumask_set_cpu(cpu, mask);
cpu               515 arch/x86/xen/pmu.c bool is_xen_pmu(int cpu)
cpu               520 arch/x86/xen/pmu.c void xen_pmu_init(int cpu)
cpu               540 arch/x86/xen/pmu.c 	xp.vcpu = cpu;
cpu               547 arch/x86/xen/pmu.c 	per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data;
cpu               548 arch/x86/xen/pmu.c 	per_cpu(xenpmu_shared, cpu).flags = 0;
cpu               550 arch/x86/xen/pmu.c 	if (cpu == 0) {
cpu               562 arch/x86/xen/pmu.c 			cpu, err);
cpu               566 arch/x86/xen/pmu.c void xen_pmu_finish(int cpu)
cpu               573 arch/x86/xen/pmu.c 	xp.vcpu = cpu;
cpu               579 arch/x86/xen/pmu.c 	free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0);
cpu               580 arch/x86/xen/pmu.c 	per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL;
cpu                 9 arch/x86/xen/pmu.h void xen_pmu_init(int cpu);
cpu                10 arch/x86/xen/pmu.h void xen_pmu_finish(int cpu);
cpu                12 arch/x86/xen/pmu.h static inline void xen_pmu_init(int cpu) {}
cpu                13 arch/x86/xen/pmu.h static inline void xen_pmu_finish(int cpu) {}
cpu                15 arch/x86/xen/pmu.h bool is_xen_pmu(int cpu);
cpu                33 arch/x86/xen/smp.c void xen_smp_intr_free(unsigned int cpu)
cpu                35 arch/x86/xen/smp.c 	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
cpu                36 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
cpu                37 arch/x86/xen/smp.c 		per_cpu(xen_resched_irq, cpu).irq = -1;
cpu                38 arch/x86/xen/smp.c 		kfree(per_cpu(xen_resched_irq, cpu).name);
cpu                39 arch/x86/xen/smp.c 		per_cpu(xen_resched_irq, cpu).name = NULL;
cpu                41 arch/x86/xen/smp.c 	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
cpu                42 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
cpu                43 arch/x86/xen/smp.c 		per_cpu(xen_callfunc_irq, cpu).irq = -1;
cpu                44 arch/x86/xen/smp.c 		kfree(per_cpu(xen_callfunc_irq, cpu).name);
cpu                45 arch/x86/xen/smp.c 		per_cpu(xen_callfunc_irq, cpu).name = NULL;
cpu                47 arch/x86/xen/smp.c 	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
cpu                48 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
cpu                49 arch/x86/xen/smp.c 		per_cpu(xen_debug_irq, cpu).irq = -1;
cpu                50 arch/x86/xen/smp.c 		kfree(per_cpu(xen_debug_irq, cpu).name);
cpu                51 arch/x86/xen/smp.c 		per_cpu(xen_debug_irq, cpu).name = NULL;
cpu                53 arch/x86/xen/smp.c 	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
cpu                54 arch/x86/xen/smp.c 		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
cpu                56 arch/x86/xen/smp.c 		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
cpu                57 arch/x86/xen/smp.c 		kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
cpu                58 arch/x86/xen/smp.c 		per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
cpu                62 arch/x86/xen/smp.c int xen_smp_intr_init(unsigned int cpu)
cpu                67 arch/x86/xen/smp.c 	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
cpu                69 arch/x86/xen/smp.c 				    cpu,
cpu                76 arch/x86/xen/smp.c 	per_cpu(xen_resched_irq, cpu).irq = rc;
cpu                77 arch/x86/xen/smp.c 	per_cpu(xen_resched_irq, cpu).name = resched_name;
cpu                79 arch/x86/xen/smp.c 	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
cpu                81 arch/x86/xen/smp.c 				    cpu,
cpu                88 arch/x86/xen/smp.c 	per_cpu(xen_callfunc_irq, cpu).irq = rc;
cpu                89 arch/x86/xen/smp.c 	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
cpu                91 arch/x86/xen/smp.c 	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
cpu                92 arch/x86/xen/smp.c 	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
cpu                97 arch/x86/xen/smp.c 	per_cpu(xen_debug_irq, cpu).irq = rc;
cpu                98 arch/x86/xen/smp.c 	per_cpu(xen_debug_irq, cpu).name = debug_name;
cpu               100 arch/x86/xen/smp.c 	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
cpu               102 arch/x86/xen/smp.c 				    cpu,
cpu               109 arch/x86/xen/smp.c 	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
cpu               110 arch/x86/xen/smp.c 	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
cpu               115 arch/x86/xen/smp.c 	xen_smp_intr_free(cpu);
cpu               121 arch/x86/xen/smp.c 	int cpu, rc, count = 0;
cpu               131 arch/x86/xen/smp.c 	for_each_online_cpu(cpu) {
cpu               132 arch/x86/xen/smp.c 		if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
cpu               135 arch/x86/xen/smp.c 		rc = cpu_down(cpu);
cpu               141 arch/x86/xen/smp.c 			xen_vcpu_info_reset(cpu);
cpu               145 arch/x86/xen/smp.c 				__func__, cpu, rc);
cpu               151 arch/x86/xen/smp.c void xen_smp_send_reschedule(int cpu)
cpu               153 arch/x86/xen/smp.c 	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
cpu               159 arch/x86/xen/smp.c 	unsigned cpu;
cpu               161 arch/x86/xen/smp.c 	for_each_cpu_and(cpu, mask, cpu_online_mask)
cpu               162 arch/x86/xen/smp.c 		xen_send_IPI_one(cpu, vector);
cpu               167 arch/x86/xen/smp.c 	int cpu;
cpu               172 arch/x86/xen/smp.c 	for_each_cpu(cpu, mask) {
cpu               173 arch/x86/xen/smp.c 		if (xen_vcpu_stolen(cpu)) {
cpu               180 arch/x86/xen/smp.c void xen_smp_send_call_function_single_ipi(int cpu)
cpu               182 arch/x86/xen/smp.c 	__xen_send_IPI_mask(cpumask_of(cpu),
cpu               246 arch/x86/xen/smp.c 	unsigned cpu;
cpu               253 arch/x86/xen/smp.c 	for_each_cpu_and(cpu, mask, cpu_online_mask) {
cpu               254 arch/x86/xen/smp.c 		if (this_cpu == cpu)
cpu               257 arch/x86/xen/smp.c 		xen_send_IPI_one(cpu, xen_vector);
cpu                13 arch/x86/xen/smp.h extern int xen_smp_intr_init(unsigned int cpu);
cpu                14 arch/x86/xen/smp.h extern void xen_smp_intr_free(unsigned int cpu);
cpu                15 arch/x86/xen/smp.h int xen_smp_intr_init_pv(unsigned int cpu);
cpu                16 arch/x86/xen/smp.h void xen_smp_intr_free_pv(unsigned int cpu);
cpu                20 arch/x86/xen/smp.h void xen_smp_send_reschedule(int cpu);
cpu                22 arch/x86/xen/smp.h void xen_smp_send_call_function_single_ipi(int cpu);
cpu                30 arch/x86/xen/smp.h static inline int xen_smp_intr_init(unsigned int cpu)
cpu                34 arch/x86/xen/smp.h static inline void xen_smp_intr_free(unsigned int cpu) {}
cpu                36 arch/x86/xen/smp.h static inline int xen_smp_intr_init_pv(unsigned int cpu)
cpu                40 arch/x86/xen/smp.h static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
cpu                32 arch/x86/xen/smp_hvm.c 	int cpu;
cpu                39 arch/x86/xen/smp_hvm.c 	for_each_possible_cpu(cpu) {
cpu                40 arch/x86/xen/smp_hvm.c 		if (cpu == 0)
cpu                44 arch/x86/xen/smp_hvm.c 		per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
cpu                49 arch/x86/xen/smp_hvm.c static void xen_hvm_cpu_die(unsigned int cpu)
cpu                51 arch/x86/xen/smp_hvm.c 	if (common_cpu_die(cpu) == 0) {
cpu                52 arch/x86/xen/smp_hvm.c 		xen_smp_intr_free(cpu);
cpu                53 arch/x86/xen/smp_hvm.c 		xen_uninit_lock_cpu(cpu);
cpu                54 arch/x86/xen/smp_hvm.c 		xen_teardown_timer(cpu);
cpu                58 arch/x86/xen/smp_hvm.c static void xen_hvm_cpu_die(unsigned int cpu)
cpu                59 arch/x86/xen/smp_pv.c 	int cpu;
cpu                71 arch/x86/xen/smp_pv.c 	cpu = smp_processor_id();
cpu                72 arch/x86/xen/smp_pv.c 	smp_store_cpu_info(cpu);
cpu                73 arch/x86/xen/smp_pv.c 	cpu_data(cpu).x86_max_cores = 1;
cpu                74 arch/x86/xen/smp_pv.c 	set_cpu_sibling_map(cpu);
cpu                80 arch/x86/xen/smp_pv.c 	notify_cpu_starting(cpu);
cpu                82 arch/x86/xen/smp_pv.c 	set_cpu_online(cpu, true);
cpu                84 arch/x86/xen/smp_pv.c 	cpu_set_state_online(cpu);  /* Implies full memory barrier. */
cpu                98 arch/x86/xen/smp_pv.c void xen_smp_intr_free_pv(unsigned int cpu)
cpu               100 arch/x86/xen/smp_pv.c 	if (per_cpu(xen_irq_work, cpu).irq >= 0) {
cpu               101 arch/x86/xen/smp_pv.c 		unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
cpu               102 arch/x86/xen/smp_pv.c 		per_cpu(xen_irq_work, cpu).irq = -1;
cpu               103 arch/x86/xen/smp_pv.c 		kfree(per_cpu(xen_irq_work, cpu).name);
cpu               104 arch/x86/xen/smp_pv.c 		per_cpu(xen_irq_work, cpu).name = NULL;
cpu               107 arch/x86/xen/smp_pv.c 	if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
cpu               108 arch/x86/xen/smp_pv.c 		unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
cpu               109 arch/x86/xen/smp_pv.c 		per_cpu(xen_pmu_irq, cpu).irq = -1;
cpu               110 arch/x86/xen/smp_pv.c 		kfree(per_cpu(xen_pmu_irq, cpu).name);
cpu               111 arch/x86/xen/smp_pv.c 		per_cpu(xen_pmu_irq, cpu).name = NULL;
cpu               115 arch/x86/xen/smp_pv.c int xen_smp_intr_init_pv(unsigned int cpu)
cpu               120 arch/x86/xen/smp_pv.c 	callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
cpu               122 arch/x86/xen/smp_pv.c 				    cpu,
cpu               129 arch/x86/xen/smp_pv.c 	per_cpu(xen_irq_work, cpu).irq = rc;
cpu               130 arch/x86/xen/smp_pv.c 	per_cpu(xen_irq_work, cpu).name = callfunc_name;
cpu               132 arch/x86/xen/smp_pv.c 	if (is_xen_pmu(cpu)) {
cpu               133 arch/x86/xen/smp_pv.c 		pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
cpu               134 arch/x86/xen/smp_pv.c 		rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
cpu               140 arch/x86/xen/smp_pv.c 		per_cpu(xen_pmu_irq, cpu).irq = rc;
cpu               141 arch/x86/xen/smp_pv.c 		per_cpu(xen_pmu_irq, cpu).name = pmu_name;
cpu               147 arch/x86/xen/smp_pv.c 	xen_smp_intr_free_pv(cpu);
cpu               236 arch/x86/xen/smp_pv.c 	unsigned cpu;
cpu               275 arch/x86/xen/smp_pv.c 		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
cpu               277 arch/x86/xen/smp_pv.c 		set_cpu_possible(cpu, false);
cpu               280 arch/x86/xen/smp_pv.c 	for_each_possible_cpu(cpu)
cpu               281 arch/x86/xen/smp_pv.c 		set_cpu_present(cpu, true);
cpu               285 arch/x86/xen/smp_pv.c cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
cpu               292 arch/x86/xen/smp_pv.c 	cpumask_set_cpu(cpu, cpu_callout_mask);
cpu               293 arch/x86/xen/smp_pv.c 	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
cpu               300 arch/x86/xen/smp_pv.c 	gdt = get_cpu_gdt_rw(cpu);
cpu               347 arch/x86/xen/smp_pv.c 	ctxt->gs_base_kernel = per_cpu_offset(cpu);
cpu               353 arch/x86/xen/smp_pv.c 	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
cpu               356 arch/x86/xen/smp_pv.c 	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
cpu               363 arch/x86/xen/smp_pv.c static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
cpu               367 arch/x86/xen/smp_pv.c 	rc = common_cpu_up(cpu, idle);
cpu               371 arch/x86/xen/smp_pv.c 	xen_setup_runstate_info(cpu);
cpu               377 arch/x86/xen/smp_pv.c 	rc = cpu_check_up_prepare(cpu);
cpu               382 arch/x86/xen/smp_pv.c 	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
cpu               384 arch/x86/xen/smp_pv.c 	rc = cpu_initialize_context(cpu, idle);
cpu               388 arch/x86/xen/smp_pv.c 	xen_pmu_init(cpu);
cpu               390 arch/x86/xen/smp_pv.c 	rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
cpu               393 arch/x86/xen/smp_pv.c 	while (cpu_report_state(cpu) != CPU_ONLINE)
cpu               402 arch/x86/xen/smp_pv.c 	unsigned int cpu = smp_processor_id();
cpu               403 arch/x86/xen/smp_pv.c 	if (cpu == 0)
cpu               412 arch/x86/xen/smp_pv.c static void xen_pv_cpu_die(unsigned int cpu)
cpu               415 arch/x86/xen/smp_pv.c 				  xen_vcpu_nr(cpu), NULL)) {
cpu               420 arch/x86/xen/smp_pv.c 	if (common_cpu_die(cpu) == 0) {
cpu               421 arch/x86/xen/smp_pv.c 		xen_smp_intr_free(cpu);
cpu               422 arch/x86/xen/smp_pv.c 		xen_uninit_lock_cpu(cpu);
cpu               423 arch/x86/xen/smp_pv.c 		xen_teardown_timer(cpu);
cpu               424 arch/x86/xen/smp_pv.c 		xen_pmu_finish(cpu);
cpu               451 arch/x86/xen/smp_pv.c static void xen_pv_cpu_die(unsigned int cpu)
cpu               464 arch/x86/xen/smp_pv.c 	int cpu = smp_processor_id();
cpu               470 arch/x86/xen/smp_pv.c 	set_cpu_online(cpu, false);
cpu               472 arch/x86/xen/smp_pv.c 	HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
cpu                23 arch/x86/xen/spinlock.c static void xen_qlock_kick(int cpu)
cpu                25 arch/x86/xen/spinlock.c 	int irq = per_cpu(lock_kicker_irq, cpu);
cpu                31 arch/x86/xen/spinlock.c 	xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
cpu                66 arch/x86/xen/spinlock.c void xen_init_lock_cpu(int cpu)
cpu                74 arch/x86/xen/spinlock.c 	WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu                75 arch/x86/xen/spinlock.c 	     cpu, per_cpu(lock_kicker_irq, cpu));
cpu                77 arch/x86/xen/spinlock.c 	name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
cpu                79 arch/x86/xen/spinlock.c 				     cpu,
cpu                87 arch/x86/xen/spinlock.c 		per_cpu(lock_kicker_irq, cpu) = irq;
cpu                88 arch/x86/xen/spinlock.c 		per_cpu(irq_name, cpu) = name;
cpu                91 arch/x86/xen/spinlock.c 	printk("cpu %d spinlock event irq %d\n", cpu, irq);
cpu                94 arch/x86/xen/spinlock.c void xen_uninit_lock_cpu(int cpu)
cpu                99 arch/x86/xen/spinlock.c 	unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
cpu               100 arch/x86/xen/spinlock.c 	per_cpu(lock_kicker_irq, cpu) = -1;
cpu               101 arch/x86/xen/spinlock.c 	kfree(per_cpu(irq_name, cpu));
cpu               102 arch/x86/xen/spinlock.c 	per_cpu(irq_name, cpu) = NULL;
cpu                68 arch/x86/xen/suspend.c 	int cpu;
cpu                72 arch/x86/xen/suspend.c 	for_each_online_cpu(cpu)
cpu                73 arch/x86/xen/suspend.c 		xen_pmu_init(cpu);
cpu                78 arch/x86/xen/suspend.c 	int cpu;
cpu                80 arch/x86/xen/suspend.c 	for_each_online_cpu(cpu)
cpu                81 arch/x86/xen/suspend.c 		xen_pmu_finish(cpu);
cpu               234 arch/x86/xen/time.c 	int cpu = smp_processor_id();
cpu               236 arch/x86/xen/time.c 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu),
cpu               238 arch/x86/xen/time.c 	    HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
cpu               247 arch/x86/xen/time.c 	int cpu = smp_processor_id();
cpu               249 arch/x86/xen/time.c 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
cpu               259 arch/x86/xen/time.c 	int cpu = smp_processor_id();
cpu               269 arch/x86/xen/time.c 	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu),
cpu               317 arch/x86/xen/time.c void xen_teardown_timer(int cpu)
cpu               320 arch/x86/xen/time.c 	evt = &per_cpu(xen_clock_events, cpu).evt;
cpu               328 arch/x86/xen/time.c void xen_setup_timer(int cpu)
cpu               330 arch/x86/xen/time.c 	struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
cpu               334 arch/x86/xen/time.c 	WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
cpu               336 arch/x86/xen/time.c 		xen_teardown_timer(cpu);
cpu               338 arch/x86/xen/time.c 	printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
cpu               340 arch/x86/xen/time.c 	snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
cpu               342 arch/x86/xen/time.c 	irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
cpu               350 arch/x86/xen/time.c 	evt->cpumask = cpumask_of(cpu);
cpu               362 arch/x86/xen/time.c 	int cpu;
cpu               367 arch/x86/xen/time.c 	for_each_online_cpu(cpu) {
cpu               369 arch/x86/xen/time.c 				       xen_vcpu_nr(cpu), NULL))
cpu               476 arch/x86/xen/time.c 	int cpu = smp_processor_id();
cpu               485 arch/x86/xen/time.c 	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
cpu               509 arch/x86/xen/time.c 	xen_setup_runstate_info(cpu);
cpu               510 arch/x86/xen/time.c 	xen_setup_timer(cpu);
cpu               538 arch/x86/xen/time.c 	int cpu = smp_processor_id();
cpu               539 arch/x86/xen/time.c 	xen_setup_runstate_info(cpu);
cpu                66 arch/x86/xen/xen-ops.h void xen_setup_timer(int cpu);
cpu                67 arch/x86/xen/xen-ops.h void xen_setup_runstate_info(int cpu);
cpu                68 arch/x86/xen/xen-ops.h void xen_teardown_timer(int cpu);
cpu                81 arch/x86/xen/xen-ops.h int xen_vcpu_setup(int cpu);
cpu                82 arch/x86/xen/xen-ops.h void xen_vcpu_info_reset(int cpu);
cpu                97 arch/x86/xen/xen-ops.h void xen_init_lock_cpu(int cpu);
cpu                98 arch/x86/xen/xen-ops.h void xen_uninit_lock_cpu(int cpu);
cpu               103 arch/x86/xen/xen-ops.h static inline void xen_init_lock_cpu(int cpu)
cpu               106 arch/x86/xen/xen-ops.h static inline void xen_uninit_lock_cpu(int cpu)
cpu               150 arch/x86/xen/xen-ops.h void xen_pin_vcpu(int cpu);
cpu                18 arch/xtensa/include/asm/mmu.h 	unsigned int cpu;
cpu                35 arch/xtensa/include/asm/mmu_context.h #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
cpu                70 arch/xtensa/include/asm/mmu_context.h static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
cpu                72 arch/xtensa/include/asm/mmu_context.h 	unsigned long asid = cpu_asid_cache(cpu);
cpu                81 arch/xtensa/include/asm/mmu_context.h 	cpu_asid_cache(cpu) = asid;
cpu                82 arch/xtensa/include/asm/mmu_context.h 	mm->context.asid[cpu] = asid;
cpu                83 arch/xtensa/include/asm/mmu_context.h 	mm->context.cpu = cpu;
cpu                86 arch/xtensa/include/asm/mmu_context.h static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
cpu                93 arch/xtensa/include/asm/mmu_context.h 		unsigned long asid = mm->context.asid[cpu];
cpu                96 arch/xtensa/include/asm/mmu_context.h 				((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
cpu                97 arch/xtensa/include/asm/mmu_context.h 			get_new_mmu_context(mm, cpu);
cpu               101 arch/xtensa/include/asm/mmu_context.h static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
cpu               103 arch/xtensa/include/asm/mmu_context.h 	get_mmu_context(mm, cpu);
cpu               104 arch/xtensa/include/asm/mmu_context.h 	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
cpu               117 arch/xtensa/include/asm/mmu_context.h 	int cpu;
cpu               118 arch/xtensa/include/asm/mmu_context.h 	for_each_possible_cpu(cpu) {
cpu               119 arch/xtensa/include/asm/mmu_context.h 		mm->context.asid[cpu] = NO_CONTEXT;
cpu               121 arch/xtensa/include/asm/mmu_context.h 	mm->context.cpu = -1;
cpu               128 arch/xtensa/include/asm/mmu_context.h 	unsigned int cpu = smp_processor_id();
cpu               129 arch/xtensa/include/asm/mmu_context.h 	int migrated = next->context.cpu != cpu;
cpu               133 arch/xtensa/include/asm/mmu_context.h 		next->context.cpu = cpu;
cpu               136 arch/xtensa/include/asm/mmu_context.h 		activate_context(next, cpu);
cpu                35 arch/xtensa/include/asm/mxregs.h #define MIPICAUSE(cpu)	(0x100 + (cpu))
cpu                14 arch/xtensa/include/asm/smp.h #define raw_smp_processor_id()	(current_thread_info()->cpu)
cpu                15 arch/xtensa/include/asm/smp.h #define cpu_logical_map(cpu)	(cpu)
cpu                24 arch/xtensa/include/asm/smp.h void arch_send_call_function_single_ipi(int cpu);
cpu                34 arch/xtensa/include/asm/smp.h void __cpu_die(unsigned int cpu);
cpu                52 arch/xtensa/include/asm/thread_info.h 	__u32			cpu;		/* current CPU */
cpu                78 arch/xtensa/include/asm/thread_info.h 	.cpu		= 0,			\
cpu                36 arch/xtensa/include/asm/timex.h void local_timer_setup(unsigned cpu);
cpu                88 arch/xtensa/kernel/asm-offsets.c 	OFFSET(TI_CPU, thread_info, cpu);
cpu                56 arch/xtensa/kernel/irq.c 	unsigned cpu __maybe_unused;
cpu                62 arch/xtensa/kernel/irq.c 	for_each_online_cpu(cpu)
cpu                63 arch/xtensa/kernel/irq.c 		seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
cpu               171 arch/xtensa/kernel/irq.c 	unsigned int i, cpu = smp_processor_id();
cpu               182 arch/xtensa/kernel/irq.c 		if (!cpumask_test_cpu(cpu, mask))
cpu               189 arch/xtensa/kernel/irq.c 					    i, cpu);
cpu               404 arch/xtensa/kernel/perf_event.c static int xtensa_pmu_setup(int cpu)
cpu               407 arch/xtensa/kernel/setup.c static DEFINE_PER_CPU(struct cpu, cpu_data);
cpu               414 arch/xtensa/kernel/setup.c 		struct cpu *cpu = &per_cpu(cpu_data, i);
cpu               415 arch/xtensa/kernel/setup.c 		cpu->hotpluggable = !!i;
cpu               416 arch/xtensa/kernel/setup.c 		register_cpu(cpu, i);
cpu               110 arch/xtensa/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               111 arch/xtensa/kernel/smp.c 	BUG_ON(cpu != 0);
cpu               112 arch/xtensa/kernel/smp.c 	cpu_asid_cache(cpu) = ASID_USER_FIRST;
cpu               125 arch/xtensa/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               132 arch/xtensa/kernel/smp.c 			__func__, boot_secondary_processors, cpu);
cpu               138 arch/xtensa/kernel/smp.c 		__func__, boot_secondary_processors, cpu);
cpu               149 arch/xtensa/kernel/smp.c 	cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu               157 arch/xtensa/kernel/smp.c 	notify_cpu_starting(cpu);
cpu               160 arch/xtensa/kernel/smp.c 	local_timer_setup(cpu);
cpu               162 arch/xtensa/kernel/smp.c 	set_cpu_online(cpu, true);
cpu               173 arch/xtensa/kernel/smp.c 	unsigned cpu = (unsigned)p;
cpu               176 arch/xtensa/kernel/smp.c 	set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
cpu               178 arch/xtensa/kernel/smp.c 			__func__, cpu, run_stall_mask, get_er(MPSCORE));
cpu               183 arch/xtensa/kernel/smp.c 	unsigned cpu = (unsigned)p;
cpu               186 arch/xtensa/kernel/smp.c 	set_er(run_stall_mask | (1u << cpu), MPSCORE);
cpu               188 arch/xtensa/kernel/smp.c 			__func__, cpu, run_stall_mask, get_er(MPSCORE));
cpu               196 arch/xtensa/kernel/smp.c static int boot_secondary(unsigned int cpu, struct task_struct *ts)
cpu               203 arch/xtensa/kernel/smp.c 	WRITE_ONCE(cpu_start_id, cpu);
cpu               209 arch/xtensa/kernel/smp.c 	smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
cpu               229 arch/xtensa/kernel/smp.c 						 (void *)cpu, 1);
cpu               237 arch/xtensa/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *idle)
cpu               241 arch/xtensa/kernel/smp.c 	if (cpu_asid_cache(cpu) == 0)
cpu               242 arch/xtensa/kernel/smp.c 		cpu_asid_cache(cpu) = ASID_USER_FIRST;
cpu               248 arch/xtensa/kernel/smp.c 			__func__, cpu, idle, start_info.stack);
cpu               251 arch/xtensa/kernel/smp.c 	ret = boot_secondary(cpu, idle);
cpu               255 arch/xtensa/kernel/smp.c 		if (!cpu_online(cpu))
cpu               260 arch/xtensa/kernel/smp.c 		pr_err("CPU %u failed to boot\n", cpu);
cpu               272 arch/xtensa/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               278 arch/xtensa/kernel/smp.c 	set_cpu_online(cpu, false);
cpu               293 arch/xtensa/kernel/smp.c 	clear_tasks_mm_cpumask(cpu);
cpu               298 arch/xtensa/kernel/smp.c static void platform_cpu_kill(unsigned int cpu)
cpu               300 arch/xtensa/kernel/smp.c 	smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
cpu               307 arch/xtensa/kernel/smp.c void __cpu_die(unsigned int cpu)
cpu               315 arch/xtensa/kernel/smp.c 		if (READ_ONCE(cpu_start_id) == -cpu) {
cpu               316 arch/xtensa/kernel/smp.c 			platform_cpu_kill(cpu);
cpu               320 arch/xtensa/kernel/smp.c 	pr_err("CPU%u: unable to kill\n", cpu);
cpu               385 arch/xtensa/kernel/smp.c void arch_send_call_function_single_ipi(int cpu)
cpu               387 arch/xtensa/kernel/smp.c 	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
cpu               390 arch/xtensa/kernel/smp.c void smp_send_reschedule(int cpu)
cpu               392 arch/xtensa/kernel/smp.c 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
cpu               404 arch/xtensa/kernel/smp.c static void ipi_cpu_stop(unsigned int cpu)
cpu               406 arch/xtensa/kernel/smp.c 	set_cpu_online(cpu, false);
cpu               412 arch/xtensa/kernel/smp.c 	unsigned int cpu = smp_processor_id();
cpu               413 arch/xtensa/kernel/smp.c 	struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
cpu               418 arch/xtensa/kernel/smp.c 		msg = get_er(MIPICAUSE(cpu));
cpu               419 arch/xtensa/kernel/smp.c 		set_er(msg, MIPICAUSE(cpu));
cpu               436 arch/xtensa/kernel/smp.c 			ipi_cpu_stop(cpu);
cpu               445 arch/xtensa/kernel/smp.c 	unsigned int cpu;
cpu               450 arch/xtensa/kernel/smp.c 		for_each_online_cpu(cpu)
cpu               452 arch/xtensa/kernel/smp.c 					per_cpu(ipi_data, cpu).ipi_count[i]);
cpu               137 arch/xtensa/kernel/time.c void local_timer_setup(unsigned cpu)
cpu               139 arch/xtensa/kernel/time.c 	struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
cpu               143 arch/xtensa/kernel/time.c 	snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
cpu               145 arch/xtensa/kernel/time.c 	clockevent->cpumask = cpumask_of(cpu);
cpu               157 arch/xtensa/kernel/time.c 	struct device_node *cpu;
cpu               160 arch/xtensa/kernel/time.c 	cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
cpu               161 arch/xtensa/kernel/time.c 	if (cpu) {
cpu               162 arch/xtensa/kernel/time.c 		clk = of_clk_get(cpu, 0);
cpu               367 arch/xtensa/kernel/traps.c 		unsigned int cpu;					\
cpu               369 arch/xtensa/kernel/traps.c 		for_each_possible_cpu(cpu)				\
cpu               370 arch/xtensa/kernel/traps.c 			per_cpu(exc_table, cpu).type[cause] = (handler);\
cpu                65 arch/xtensa/mm/tlb.c 	int cpu = smp_processor_id();
cpu                70 arch/xtensa/mm/tlb.c 		mm->context.asid[cpu] = NO_CONTEXT;
cpu                71 arch/xtensa/mm/tlb.c 		activate_context(mm, cpu);
cpu                74 arch/xtensa/mm/tlb.c 		mm->context.asid[cpu] = NO_CONTEXT;
cpu                75 arch/xtensa/mm/tlb.c 		mm->context.cpu = -1;
cpu                91 arch/xtensa/mm/tlb.c 	int cpu = smp_processor_id();
cpu                95 arch/xtensa/mm/tlb.c 	if (mm->context.asid[cpu] == NO_CONTEXT)
cpu                99 arch/xtensa/mm/tlb.c 		 (unsigned long)mm->context.asid[cpu], start, end);
cpu               105 arch/xtensa/mm/tlb.c 		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
cpu               128 arch/xtensa/mm/tlb.c 	int cpu = smp_processor_id();
cpu               133 arch/xtensa/mm/tlb.c 	if (mm->context.asid[cpu] == NO_CONTEXT)
cpu               139 arch/xtensa/mm/tlb.c 	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
cpu              1660 block/blk-core.c int kblockd_schedule_work_on(int cpu, struct work_struct *work)
cpu              1662 block/blk-core.c 	return queue_work_on(cpu, kblockd_workqueue, work);
cpu              1666 block/blk-core.c int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
cpu              1669 block/blk-core.c 	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
cpu              1282 block/blk-iocost.c 	int cpu, rw;
cpu              1284 block/blk-iocost.c 	for_each_online_cpu(cpu) {
cpu              1285 block/blk-iocost.c 		struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
cpu               527 block/blk-iolatency.c 	int cpu;
cpu               531 block/blk-iolatency.c 	for_each_online_cpu(cpu) {
cpu               533 block/blk-iolatency.c 		s = per_cpu_ptr(iolat->stats, cpu);
cpu               892 block/blk-iolatency.c 	int cpu;
cpu               896 block/blk-iolatency.c 	for_each_online_cpu(cpu) {
cpu               898 block/blk-iolatency.c 		s = per_cpu_ptr(iolat->stats, cpu);
cpu               962 block/blk-iolatency.c 	int cpu;
cpu               969 block/blk-iolatency.c 	for_each_possible_cpu(cpu) {
cpu               971 block/blk-iolatency.c 		stat = per_cpu_ptr(iolat->stats, cpu);
cpu                24 block/blk-mq-cpumap.c static int get_first_sibling(unsigned int cpu)
cpu                28 block/blk-mq-cpumap.c 	ret = cpumask_first(topology_sibling_cpumask(cpu));
cpu                32 block/blk-mq-cpumap.c 	return cpu;
cpu                39 block/blk-mq-cpumap.c 	unsigned int cpu, first_sibling, q = 0;
cpu                41 block/blk-mq-cpumap.c 	for_each_possible_cpu(cpu)
cpu                42 block/blk-mq-cpumap.c 		map[cpu] = -1;
cpu                48 block/blk-mq-cpumap.c 	for_each_present_cpu(cpu) {
cpu                51 block/blk-mq-cpumap.c 		map[cpu] = queue_index(qmap, nr_queues, q++);
cpu                54 block/blk-mq-cpumap.c 	for_each_possible_cpu(cpu) {
cpu                55 block/blk-mq-cpumap.c 		if (map[cpu] != -1)
cpu                64 block/blk-mq-cpumap.c 			map[cpu] = queue_index(qmap, nr_queues, q++);
cpu                66 block/blk-mq-cpumap.c 			first_sibling = get_first_sibling(cpu);
cpu                67 block/blk-mq-cpumap.c 			if (first_sibling == cpu)
cpu                68 block/blk-mq-cpumap.c 				map[cpu] = queue_index(qmap, nr_queues, q++);
cpu                70 block/blk-mq-cpumap.c 				map[cpu] = map[first_sibling];
cpu               870 block/blk-mq-debugfs.c 	snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
cpu                30 block/blk-mq-pci.c 	unsigned int queue, cpu;
cpu                37 block/blk-mq-pci.c 		for_each_cpu(cpu, mask)
cpu                38 block/blk-mq-pci.c 			qmap->mq_map[cpu] = qmap->queue_offset + queue;
cpu                28 block/blk-mq-rdma.c 	unsigned int queue, cpu;
cpu                35 block/blk-mq-rdma.c 		for_each_cpu(cpu, mask)
cpu                36 block/blk-mq-rdma.c 			map->mq_map[cpu] = map->queue_offset + queue;
cpu               265 block/blk-mq-sysfs.c 		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
cpu               298 block/blk-mq-sysfs.c 	int cpu;
cpu               300 block/blk-mq-sysfs.c 	for_each_possible_cpu(cpu) {
cpu               301 block/blk-mq-sysfs.c 		ctx = per_cpu_ptr(q->queue_ctx, cpu);
cpu               310 block/blk-mq-sysfs.c 	int cpu;
cpu               314 block/blk-mq-sysfs.c 	for_each_possible_cpu(cpu) {
cpu               315 block/blk-mq-sysfs.c 		ctx = per_cpu_ptr(q->queue_ctx, cpu);
cpu               201 block/blk-mq-tag.c 		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
cpu               204 block/blk-mq-tag.c 		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
cpu                28 block/blk-mq-virtio.c 	unsigned int queue, cpu;
cpu                38 block/blk-mq-virtio.c 		for_each_cpu(cpu, mask)
cpu                39 block/blk-mq-virtio.c 			qmap->mq_map[cpu] = qmap->queue_offset + queue;
cpu               451 block/blk-mq.c 	unsigned int cpu;
cpu               479 block/blk-mq.c 	cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
cpu               480 block/blk-mq.c 	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
cpu               587 block/blk-mq.c 	int cpu;
cpu               614 block/blk-mq.c 	cpu = get_cpu();
cpu               616 block/blk-mq.c 		shared = cpus_share_cache(cpu, ctx->cpu);
cpu               618 block/blk-mq.c 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
cpu               622 block/blk-mq.c 		smp_call_function_single_async(ctx->cpu, &rq->csd);
cpu              1411 block/blk-mq.c 	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
cpu              1413 block/blk-mq.c 	if (cpu >= nr_cpu_ids)
cpu              1414 block/blk-mq.c 		cpu = cpumask_first(hctx->cpumask);
cpu              1415 block/blk-mq.c 	return cpu;
cpu              1471 block/blk-mq.c 		int cpu = get_cpu();
cpu              1472 block/blk-mq.c 		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
cpu              2239 block/blk-mq.c static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
cpu              2247 block/blk-mq.c 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
cpu              2429 block/blk-mq.c 		__ctx->cpu = i;
cpu              2651 block/blk-mq.c 	int cpu;
cpu              2661 block/blk-mq.c 	for_each_possible_cpu(cpu) {
cpu              2662 block/blk-mq.c 		struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
cpu              3556 block/blk-mq.c 	return rq->mq_ctx->cpu;
cpu                24 block/blk-mq.h 	unsigned int		cpu;
cpu                92 block/blk-mq.h 							  unsigned int cpu)
cpu                94 block/blk-mq.h 	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
cpu               142 block/blk-mq.h 					   unsigned int cpu)
cpu               144 block/blk-mq.h 	return per_cpu_ptr(q->queue_ctx, cpu);
cpu               231 block/blk-mq.h 	int cpu;
cpu               233 block/blk-mq.h 	for_each_possible_cpu(cpu)
cpu               234 block/blk-mq.h 		qmap->mq_map[cpu] = 0;
cpu                61 block/blk-softirq.c static int raise_blk_irq(int cpu, struct request *rq)
cpu                63 block/blk-softirq.c 	if (cpu_online(cpu)) {
cpu                70 block/blk-softirq.c 		smp_call_function_single_async(cpu, data);
cpu                77 block/blk-softirq.c static int raise_blk_irq(int cpu, struct request *rq)
cpu                83 block/blk-softirq.c static int blk_softirq_cpu_dead(unsigned int cpu)
cpu                90 block/blk-softirq.c 	list_splice_init(&per_cpu(blk_cpu_done, cpu),
cpu               101 block/blk-softirq.c 	int cpu, ccpu = req->mq_ctx->cpu;
cpu               108 block/blk-softirq.c 	cpu = smp_processor_id();
cpu               115 block/blk-softirq.c 			shared = cpus_share_cache(cpu, ccpu);
cpu               117 block/blk-softirq.c 		ccpu = cpu;
cpu               127 block/blk-softirq.c 	if (ccpu == cpu || shared) {
cpu                83 block/blk-stat.c 	int cpu;
cpu                88 block/blk-stat.c 	for_each_online_cpu(cpu) {
cpu                91 block/blk-stat.c 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
cpu               139 block/blk-stat.c 	int cpu;
cpu               141 block/blk-stat.c 	for_each_possible_cpu(cpu) {
cpu               144 block/blk-stat.c 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
cpu              2038 block/blk-throttle.c 	int i, cpu, rw;
cpu              2053 block/blk-throttle.c 			for_each_possible_cpu(cpu) {
cpu              2058 block/blk-throttle.c 					cpu);
cpu                71 block/genhd.c  	int cpu;
cpu                79 block/genhd.c  	for_each_possible_cpu(cpu) {
cpu                80 block/genhd.c  		inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) +
cpu                81 block/genhd.c  			    part_stat_local_read_cpu(part, in_flight[1], cpu);
cpu                92 block/genhd.c  	int cpu;
cpu               101 block/genhd.c  	for_each_possible_cpu(cpu) {
cpu               102 block/genhd.c  		inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
cpu               103 block/genhd.c  		inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
cpu               280 block/kyber-iosched.c 	int cpu;
cpu               284 block/kyber-iosched.c 	for_each_online_cpu(cpu) {
cpu               287 block/kyber-iosched.c 		cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
cpu               548 block/kyber-iosched.c 				    rq->mq_ctx->cpu);
cpu                98 crypto/cryptd.c 	int cpu;
cpu               104 crypto/cryptd.c 	for_each_possible_cpu(cpu) {
cpu               105 crypto/cryptd.c 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
cpu               115 crypto/cryptd.c 	int cpu;
cpu               118 crypto/cryptd.c 	for_each_possible_cpu(cpu) {
cpu               119 crypto/cryptd.c 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
cpu               128 crypto/cryptd.c 	int cpu, err;
cpu               132 crypto/cryptd.c 	cpu = get_cpu();
cpu               141 crypto/cryptd.c 	queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
cpu               170 crypto/pcrypt.c 	int cpu, cpu_index;
cpu               180 crypto/pcrypt.c 	for (cpu = 0; cpu < cpu_index; cpu++)
cpu                64 drivers/acpi/acpi_extlog.c #define ELOG_IDX(cpu, bank) \
cpu                65 drivers/acpi/acpi_extlog.c 	(cpu_physical_id(cpu) * l1_percpu_entry + (bank))
cpu                73 drivers/acpi/acpi_extlog.c static struct acpi_hest_generic_status *extlog_elog_entry_check(int cpu, int bank)
cpu                79 drivers/acpi/acpi_extlog.c 	WARN_ON(cpu < 0);
cpu                80 drivers/acpi/acpi_extlog.c 	idx = ELOG_IDX(cpu, bank);
cpu                96 drivers/acpi/acpi_extlog.c 			       struct acpi_hest_generic_status *estatus, int cpu)
cpu               110 drivers/acpi/acpi_extlog.c 	printk("%s""Hardware error detected on CPU%d\n", pfx_seq, cpu);
cpu               115 drivers/acpi/acpi_extlog.c 			    struct acpi_hest_generic_status *estatus, int cpu)
cpu               128 drivers/acpi/acpi_extlog.c 		__print_extlog_rcd(pfx, estatus, cpu);
cpu               140 drivers/acpi/acpi_extlog.c 	int	cpu = mce->extcpu;
cpu               148 drivers/acpi/acpi_extlog.c 	estatus = extlog_elog_entry_check(cpu, bank);
cpu               159 drivers/acpi/acpi_extlog.c 		print_extlog_rcd(NULL, tmp, cpu);
cpu                89 drivers/acpi/acpi_pad.c 	int cpu;
cpu                98 drivers/acpi/acpi_pad.c 	for_each_cpu(cpu, pad_busy_cpus)
cpu                99 drivers/acpi/acpi_pad.c 		cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
cpu               109 drivers/acpi/acpi_pad.c 	for_each_cpu(cpu, tmp) {
cpu               110 drivers/acpi/acpi_pad.c 		if (cpu_weight[cpu] < min_weight) {
cpu               111 drivers/acpi/acpi_pad.c 			min_weight = cpu_weight[cpu];
cpu               112 drivers/acpi/acpi_pad.c 			preferred_cpu = cpu;
cpu               171 drivers/acpi/acpi_processor.c int __weak acpi_unmap_cpu(int cpu)
cpu               176 drivers/acpi/acpi_processor.c int __weak arch_register_cpu(int cpu)
cpu               181 drivers/acpi/acpi_processor.c void __weak arch_unregister_cpu(int cpu) {}
cpu               967 drivers/acpi/cppc_acpi.c static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
cpu               971 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
cpu               985 drivers/acpi/cppc_acpi.c 		return cpc_read_ffh(cpu, reg, val);
cpu              1012 drivers/acpi/cppc_acpi.c static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
cpu              1016 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
cpu              1024 drivers/acpi/cppc_acpi.c 		return cpc_write_ffh(cpu, reg, val);
cpu              1271 drivers/acpi/cppc_acpi.c int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
cpu              1273 drivers/acpi/cppc_acpi.c 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
cpu              1275 drivers/acpi/cppc_acpi.c 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
cpu              1280 drivers/acpi/cppc_acpi.c 		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
cpu              1320 drivers/acpi/cppc_acpi.c 	cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
cpu               287 drivers/acpi/pptt.c 	struct acpi_pptt_processor *cpu;
cpu               289 drivers/acpi/pptt.c 	cpu = acpi_find_processor_node(table_hdr, acpi_cpu_id);
cpu               290 drivers/acpi/pptt.c 	if (cpu)
cpu               291 drivers/acpi/pptt.c 		number_of_levels = acpi_count_levels(table_hdr, cpu);
cpu               410 drivers/acpi/pptt.c 				 unsigned int cpu)
cpu               413 drivers/acpi/pptt.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu               414 drivers/acpi/pptt.c 	u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
cpu               419 drivers/acpi/pptt.c 	while (index < get_cpu_cacheinfo(cpu)->num_leaves) {
cpu               436 drivers/acpi/pptt.c 			   struct acpi_pptt_processor *cpu)
cpu               445 drivers/acpi/pptt.c 	if (cpu->flags & ACPI_PPTT_ACPI_IDENTICAL) {
cpu               446 drivers/acpi/pptt.c 		next = fetch_pptt_node(table_hdr, cpu->parent);
cpu               458 drivers/acpi/pptt.c 							   struct acpi_pptt_processor *cpu,
cpu               463 drivers/acpi/pptt.c 	while (cpu && level) {
cpu               466 drivers/acpi/pptt.c 			if (flag_identical(table_hdr, cpu))
cpu               468 drivers/acpi/pptt.c 		} else if (cpu->flags & flag)
cpu               471 drivers/acpi/pptt.c 		prev_node = fetch_pptt_node(table_hdr, cpu->parent);
cpu               474 drivers/acpi/pptt.c 		cpu = prev_node;
cpu               477 drivers/acpi/pptt.c 	return cpu;
cpu               499 drivers/acpi/pptt.c 				     unsigned int cpu, int level, int flag)
cpu               502 drivers/acpi/pptt.c 	u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
cpu               520 drivers/acpi/pptt.c 		    cpu, acpi_cpu_id);
cpu               524 drivers/acpi/pptt.c static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
cpu               535 drivers/acpi/pptt.c 	retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
cpu               537 drivers/acpi/pptt.c 		 cpu, level, retval);
cpu               556 drivers/acpi/pptt.c static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
cpu               560 drivers/acpi/pptt.c 	u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
cpu               591 drivers/acpi/pptt.c int acpi_find_last_cache_level(unsigned int cpu)
cpu               598 drivers/acpi/pptt.c 	pr_debug("Cache Setup find last level CPU=%d\n", cpu);
cpu               600 drivers/acpi/pptt.c 	acpi_cpu_id = get_acpi_id_for_cpu(cpu);
cpu               626 drivers/acpi/pptt.c int cache_setup_acpi(unsigned int cpu)
cpu               631 drivers/acpi/pptt.c 	pr_debug("Cache Setup ACPI CPU %d\n", cpu);
cpu               639 drivers/acpi/pptt.c 	cache_setup_acpi_cpu(table, cpu);
cpu               654 drivers/acpi/pptt.c int acpi_pptt_cpu_is_thread(unsigned int cpu)
cpu               656 drivers/acpi/pptt.c 	return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD);
cpu               677 drivers/acpi/pptt.c int find_acpi_cpu_topology(unsigned int cpu, int level)
cpu               679 drivers/acpi/pptt.c 	return find_acpi_cpu_topology_tag(cpu, level, 0);
cpu               692 drivers/acpi/pptt.c int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
cpu               697 drivers/acpi/pptt.c 	u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
cpu               732 drivers/acpi/pptt.c int find_acpi_cpu_topology_package(unsigned int cpu)
cpu               734 drivers/acpi/pptt.c 	return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
cpu               758 drivers/acpi/pptt.c int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
cpu               760 drivers/acpi/pptt.c 	return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
cpu               100 drivers/acpi/processor_driver.c static int acpi_soft_cpu_online(unsigned int cpu)
cpu               102 drivers/acpi/processor_driver.c 	struct acpi_processor *pr = per_cpu(processors, cpu);
cpu               129 drivers/acpi/processor_driver.c static int acpi_soft_cpu_dead(unsigned int cpu)
cpu               131 drivers/acpi/processor_driver.c 	struct acpi_processor *pr = per_cpu(processors, cpu);
cpu               675 drivers/acpi/processor_idle.c 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
cpu               757 drivers/acpi/processor_idle.c 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
cpu               767 drivers/acpi/processor_idle.c 			cx = per_cpu(acpi_cstate[index], dev->cpu);
cpu               774 drivers/acpi/processor_idle.c 				cx = per_cpu(acpi_cstate[index], dev->cpu);
cpu               797 drivers/acpi/processor_idle.c 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
cpu               830 drivers/acpi/processor_idle.c 		per_cpu(acpi_cstate[count], dev->cpu) = cx;
cpu              1223 drivers/acpi/processor_idle.c int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
cpu              1327 drivers/acpi/processor_idle.c 	dev->cpu = pr->id;
cpu              1371 drivers/acpi/processor_idle.c 	int cpu;
cpu              1394 drivers/acpi/processor_idle.c 		for_each_online_cpu(cpu) {
cpu              1395 drivers/acpi/processor_idle.c 			_pr = per_cpu(processors, cpu);
cpu              1398 drivers/acpi/processor_idle.c 			dev = per_cpu(acpi_cpuidle_device, cpu);
cpu              1407 drivers/acpi/processor_idle.c 		for_each_online_cpu(cpu) {
cpu              1408 drivers/acpi/processor_idle.c 			_pr = per_cpu(processors, cpu);
cpu              1413 drivers/acpi/processor_idle.c 				dev = per_cpu(acpi_cpuidle_device, cpu);
cpu               141 drivers/acpi/processor_perflib.c int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
cpu               145 drivers/acpi/processor_perflib.c 	pr = per_cpu(processors, cpu);
cpu               162 drivers/acpi/processor_perflib.c 	unsigned int cpu;
cpu               164 drivers/acpi/processor_perflib.c 	for_each_cpu(cpu, policy->related_cpus) {
cpu               165 drivers/acpi/processor_perflib.c 		struct acpi_processor *pr = per_cpu(processors, cpu);
cpu               176 drivers/acpi/processor_perflib.c 			       cpu, ret);
cpu               182 drivers/acpi/processor_perflib.c 	unsigned int cpu;
cpu               184 drivers/acpi/processor_perflib.c 	for_each_cpu(cpu, policy->related_cpus) {
cpu               185 drivers/acpi/processor_perflib.c 		struct acpi_processor *pr = per_cpu(processors, cpu);
cpu               727 drivers/acpi/processor_perflib.c 				    *performance, unsigned int cpu)
cpu               736 drivers/acpi/processor_perflib.c 	pr = per_cpu(processors, cpu);
cpu               763 drivers/acpi/processor_perflib.c void acpi_processor_unregister_performance(unsigned int cpu)
cpu               769 drivers/acpi/processor_perflib.c 	pr = per_cpu(processors, cpu);
cpu                39 drivers/acpi/processor_thermal.c #define reduction_pctg(cpu) \
cpu                40 drivers/acpi/processor_thermal.c 	per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
cpu                49 drivers/acpi/processor_thermal.c static int phys_package_first_cpu(int cpu)
cpu                52 drivers/acpi/processor_thermal.c 	int id = topology_physical_package_id(cpu);
cpu                60 drivers/acpi/processor_thermal.c static int cpu_has_cpufreq(unsigned int cpu)
cpu                63 drivers/acpi/processor_thermal.c 	if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu))
cpu                68 drivers/acpi/processor_thermal.c static int cpufreq_get_max_state(unsigned int cpu)
cpu                70 drivers/acpi/processor_thermal.c 	if (!cpu_has_cpufreq(cpu))
cpu                76 drivers/acpi/processor_thermal.c static int cpufreq_get_cur_state(unsigned int cpu)
cpu                78 drivers/acpi/processor_thermal.c 	if (!cpu_has_cpufreq(cpu))
cpu                81 drivers/acpi/processor_thermal.c 	return reduction_pctg(cpu);
cpu                84 drivers/acpi/processor_thermal.c static int cpufreq_set_cur_state(unsigned int cpu, int state)
cpu                91 drivers/acpi/processor_thermal.c 	if (!cpu_has_cpufreq(cpu))
cpu                94 drivers/acpi/processor_thermal.c 	reduction_pctg(cpu) = state;
cpu               103 drivers/acpi/processor_thermal.c 		    topology_physical_package_id(cpu))
cpu               130 drivers/acpi/processor_thermal.c 	unsigned int cpu;
cpu               132 drivers/acpi/processor_thermal.c 	for_each_cpu(cpu, policy->related_cpus) {
cpu               133 drivers/acpi/processor_thermal.c 		struct acpi_processor *pr = per_cpu(processors, cpu);
cpu               144 drivers/acpi/processor_thermal.c 			       cpu, ret);
cpu               150 drivers/acpi/processor_thermal.c 	unsigned int cpu;
cpu               152 drivers/acpi/processor_thermal.c 	for_each_cpu(cpu, policy->related_cpus) {
cpu               153 drivers/acpi/processor_thermal.c 		struct acpi_processor *pr = per_cpu(processors, policy->cpu);
cpu               160 drivers/acpi/processor_thermal.c static int cpufreq_get_max_state(unsigned int cpu)
cpu               165 drivers/acpi/processor_thermal.c static int cpufreq_get_cur_state(unsigned int cpu)
cpu               170 drivers/acpi/processor_thermal.c static int cpufreq_set_cur_state(unsigned int cpu, int state)
cpu                38 drivers/acpi/processor_throttling.c 	unsigned int cpu;		/* cpu nr */
cpu               212 drivers/acpi/processor_throttling.c 	unsigned int cpu ;
cpu               217 drivers/acpi/processor_throttling.c 	cpu = p_tstate->cpu;
cpu               218 drivers/acpi/processor_throttling.c 	pr = per_cpu(processors, cpu);
cpu               225 drivers/acpi/processor_throttling.c 				"unsupported on CPU %d\n", cpu));
cpu               251 drivers/acpi/processor_throttling.c 				cpu, target_state));
cpu               261 drivers/acpi/processor_throttling.c 				cpu, target_state));
cpu              1103 drivers/acpi/processor_throttling.c 		t_state.cpu = i;
cpu              1162 drivers/acpi/processor_throttling.c 		t_state.cpu = i;
cpu                40 drivers/base/arch_topology.c void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
cpu                42 drivers/base/arch_topology.c 	per_cpu(cpu_scale, cpu) = capacity;
cpu                49 drivers/base/arch_topology.c 	struct cpu *cpu = container_of(dev, struct cpu, dev);
cpu                51 drivers/base/arch_topology.c 	return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
cpu                62 drivers/base/arch_topology.c 	struct device *cpu;
cpu                65 drivers/base/arch_topology.c 		cpu = get_cpu_device(i);
cpu                66 drivers/base/arch_topology.c 		if (!cpu) {
cpu                71 drivers/base/arch_topology.c 		device_create_file(cpu, &dev_attr_cpu_capacity);
cpu               111 drivers/base/arch_topology.c 	int cpu;
cpu               117 drivers/base/arch_topology.c 	for_each_possible_cpu(cpu) {
cpu               119 drivers/base/arch_topology.c 			 cpu, raw_capacity[cpu]);
cpu               120 drivers/base/arch_topology.c 		capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
cpu               122 drivers/base/arch_topology.c 		topology_set_cpu_scale(cpu, capacity);
cpu               124 drivers/base/arch_topology.c 			cpu, topology_get_cpu_scale(cpu));
cpu               128 drivers/base/arch_topology.c bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
cpu               150 drivers/base/arch_topology.c 		raw_capacity[cpu] = cpu_capacity;
cpu               152 drivers/base/arch_topology.c 			cpu_node, raw_capacity[cpu]);
cpu               177 drivers/base/arch_topology.c 	int cpu;
cpu               191 drivers/base/arch_topology.c 	for_each_cpu(cpu, policy->related_cpus) {
cpu               192 drivers/base/arch_topology.c 		raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
cpu               194 drivers/base/arch_topology.c 		capacity_scale = max(raw_capacity[cpu], capacity_scale);
cpu               254 drivers/base/arch_topology.c 	int cpu;
cpu               260 drivers/base/arch_topology.c 	cpu = of_cpu_node_to_id(cpu_node);
cpu               261 drivers/base/arch_topology.c 	if (cpu >= 0)
cpu               262 drivers/base/arch_topology.c 		topology_parse_cpu_capacity(cpu_node, cpu);
cpu               267 drivers/base/arch_topology.c 	return cpu;
cpu               276 drivers/base/arch_topology.c 	int cpu;
cpu               284 drivers/base/arch_topology.c 			cpu = get_cpu_for_node(t);
cpu               285 drivers/base/arch_topology.c 			if (cpu >= 0) {
cpu               286 drivers/base/arch_topology.c 				cpu_topology[cpu].package_id = package_id;
cpu               287 drivers/base/arch_topology.c 				cpu_topology[cpu].core_id = core_id;
cpu               288 drivers/base/arch_topology.c 				cpu_topology[cpu].thread_id = i;
cpu               300 drivers/base/arch_topology.c 	cpu = get_cpu_for_node(core);
cpu               301 drivers/base/arch_topology.c 	if (cpu >= 0) {
cpu               308 drivers/base/arch_topology.c 		cpu_topology[cpu].package_id = package_id;
cpu               309 drivers/base/arch_topology.c 		cpu_topology[cpu].core_id = core_id;
cpu               390 drivers/base/arch_topology.c 	int cpu;
cpu               416 drivers/base/arch_topology.c 	for_each_possible_cpu(cpu)
cpu               417 drivers/base/arch_topology.c 		if (cpu_topology[cpu].package_id == -1)
cpu               434 drivers/base/arch_topology.c const struct cpumask *cpu_coregroup_mask(int cpu)
cpu               436 drivers/base/arch_topology.c 	const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
cpu               439 drivers/base/arch_topology.c 	if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
cpu               441 drivers/base/arch_topology.c 		core_mask = &cpu_topology[cpu].core_sibling;
cpu               443 drivers/base/arch_topology.c 	if (cpu_topology[cpu].llc_id != -1) {
cpu               444 drivers/base/arch_topology.c 		if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
cpu               445 drivers/base/arch_topology.c 			core_mask = &cpu_topology[cpu].llc_sibling;
cpu               454 drivers/base/arch_topology.c 	int cpu;
cpu               457 drivers/base/arch_topology.c 	for_each_online_cpu(cpu) {
cpu               458 drivers/base/arch_topology.c 		cpu_topo = &cpu_topology[cpu];
cpu               461 drivers/base/arch_topology.c 			cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
cpu               469 drivers/base/arch_topology.c 		cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
cpu               475 drivers/base/arch_topology.c 		cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
cpu               479 drivers/base/arch_topology.c static void clear_cpu_topology(int cpu)
cpu               481 drivers/base/arch_topology.c 	struct cpu_topology *cpu_topo = &cpu_topology[cpu];
cpu               484 drivers/base/arch_topology.c 	cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
cpu               487 drivers/base/arch_topology.c 	cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
cpu               489 drivers/base/arch_topology.c 	cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
cpu               494 drivers/base/arch_topology.c 	unsigned int cpu;
cpu               496 drivers/base/arch_topology.c 	for_each_possible_cpu(cpu) {
cpu               497 drivers/base/arch_topology.c 		struct cpu_topology *cpu_topo = &cpu_topology[cpu];
cpu               504 drivers/base/arch_topology.c 		clear_cpu_topology(cpu);
cpu               508 drivers/base/arch_topology.c void remove_cpu_topology(unsigned int cpu)
cpu               512 drivers/base/arch_topology.c 	for_each_cpu(sibling, topology_core_cpumask(cpu))
cpu               513 drivers/base/arch_topology.c 		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
cpu               514 drivers/base/arch_topology.c 	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
cpu               515 drivers/base/arch_topology.c 		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
cpu               516 drivers/base/arch_topology.c 	for_each_cpu(sibling, topology_llc_cpumask(cpu))
cpu               517 drivers/base/arch_topology.c 		cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
cpu               519 drivers/base/arch_topology.c 	clear_cpu_topology(cpu);
cpu                25 drivers/base/cacheinfo.c #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
cpu                26 drivers/base/cacheinfo.c #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
cpu                27 drivers/base/cacheinfo.c #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
cpu                29 drivers/base/cacheinfo.c struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
cpu                31 drivers/base/cacheinfo.c 	return ci_cacheinfo(cpu);
cpu               156 drivers/base/cacheinfo.c static int cache_setup_of_node(unsigned int cpu)
cpu               160 drivers/base/cacheinfo.c 	struct device *cpu_dev = get_cpu_device(cpu);
cpu               161 drivers/base/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu               170 drivers/base/cacheinfo.c 		pr_err("No cpu device for CPU %d\n", cpu);
cpu               175 drivers/base/cacheinfo.c 		pr_err("Failed to find cpu%d device node\n", cpu);
cpu               179 drivers/base/cacheinfo.c 	while (index < cache_leaves(cpu)) {
cpu               192 drivers/base/cacheinfo.c 	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
cpu               198 drivers/base/cacheinfo.c static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
cpu               211 drivers/base/cacheinfo.c int __weak cache_setup_acpi(unsigned int cpu)
cpu               218 drivers/base/cacheinfo.c static int cache_shared_cpu_map_setup(unsigned int cpu)
cpu               220 drivers/base/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu               229 drivers/base/cacheinfo.c 		ret = cache_setup_of_node(cpu);
cpu               231 drivers/base/cacheinfo.c 		ret = cache_setup_acpi(cpu);
cpu               236 drivers/base/cacheinfo.c 	for (index = 0; index < cache_leaves(cpu); index++) {
cpu               244 drivers/base/cacheinfo.c 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
cpu               248 drivers/base/cacheinfo.c 			if (i == cpu || !sib_cpu_ci->info_list)
cpu               252 drivers/base/cacheinfo.c 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpu               264 drivers/base/cacheinfo.c static void cache_shared_cpu_map_remove(unsigned int cpu)
cpu               266 drivers/base/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu               270 drivers/base/cacheinfo.c 	for (index = 0; index < cache_leaves(cpu); index++) {
cpu               275 drivers/base/cacheinfo.c 			if (sibling == cpu) /* skip itself */
cpu               283 drivers/base/cacheinfo.c 			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpu               291 drivers/base/cacheinfo.c static void free_cache_attributes(unsigned int cpu)
cpu               293 drivers/base/cacheinfo.c 	if (!per_cpu_cacheinfo(cpu))
cpu               296 drivers/base/cacheinfo.c 	cache_shared_cpu_map_remove(cpu);
cpu               298 drivers/base/cacheinfo.c 	kfree(per_cpu_cacheinfo(cpu));
cpu               299 drivers/base/cacheinfo.c 	per_cpu_cacheinfo(cpu) = NULL;
cpu               302 drivers/base/cacheinfo.c int __weak init_cache_level(unsigned int cpu)
cpu               307 drivers/base/cacheinfo.c int __weak populate_cache_leaves(unsigned int cpu)
cpu               312 drivers/base/cacheinfo.c static int detect_cache_attributes(unsigned int cpu)
cpu               316 drivers/base/cacheinfo.c 	if (init_cache_level(cpu) || !cache_leaves(cpu))
cpu               319 drivers/base/cacheinfo.c 	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
cpu               321 drivers/base/cacheinfo.c 	if (per_cpu_cacheinfo(cpu) == NULL)
cpu               328 drivers/base/cacheinfo.c 	ret = populate_cache_leaves(cpu);
cpu               336 drivers/base/cacheinfo.c 	ret = cache_shared_cpu_map_setup(cpu);
cpu               338 drivers/base/cacheinfo.c 		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
cpu               345 drivers/base/cacheinfo.c 	free_cache_attributes(cpu);
cpu               351 drivers/base/cacheinfo.c #define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
cpu               357 drivers/base/cacheinfo.c #define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
cpu               358 drivers/base/cacheinfo.c #define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
cpu               560 drivers/base/cacheinfo.c static void cpu_cache_sysfs_exit(unsigned int cpu)
cpu               565 drivers/base/cacheinfo.c 	if (per_cpu_index_dev(cpu)) {
cpu               566 drivers/base/cacheinfo.c 		for (i = 0; i < cache_leaves(cpu); i++) {
cpu               567 drivers/base/cacheinfo.c 			ci_dev = per_cache_index_dev(cpu, i);
cpu               572 drivers/base/cacheinfo.c 		kfree(per_cpu_index_dev(cpu));
cpu               573 drivers/base/cacheinfo.c 		per_cpu_index_dev(cpu) = NULL;
cpu               575 drivers/base/cacheinfo.c 	device_unregister(per_cpu_cache_dev(cpu));
cpu               576 drivers/base/cacheinfo.c 	per_cpu_cache_dev(cpu) = NULL;
cpu               579 drivers/base/cacheinfo.c static int cpu_cache_sysfs_init(unsigned int cpu)
cpu               581 drivers/base/cacheinfo.c 	struct device *dev = get_cpu_device(cpu);
cpu               583 drivers/base/cacheinfo.c 	if (per_cpu_cacheinfo(cpu) == NULL)
cpu               586 drivers/base/cacheinfo.c 	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
cpu               587 drivers/base/cacheinfo.c 	if (IS_ERR(per_cpu_cache_dev(cpu)))
cpu               588 drivers/base/cacheinfo.c 		return PTR_ERR(per_cpu_cache_dev(cpu));
cpu               591 drivers/base/cacheinfo.c 	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
cpu               593 drivers/base/cacheinfo.c 	if (unlikely(per_cpu_index_dev(cpu) == NULL))
cpu               599 drivers/base/cacheinfo.c 	cpu_cache_sysfs_exit(cpu);
cpu               603 drivers/base/cacheinfo.c static int cache_add_dev(unsigned int cpu)
cpu               609 drivers/base/cacheinfo.c 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
cpu               612 drivers/base/cacheinfo.c 	rc = cpu_cache_sysfs_init(cpu);
cpu               616 drivers/base/cacheinfo.c 	parent = per_cpu_cache_dev(cpu);
cpu               617 drivers/base/cacheinfo.c 	for (i = 0; i < cache_leaves(cpu); i++) {
cpu               630 drivers/base/cacheinfo.c 		per_cache_index_dev(cpu, i) = ci_dev;
cpu               632 drivers/base/cacheinfo.c 	cpumask_set_cpu(cpu, &cache_dev_map);
cpu               636 drivers/base/cacheinfo.c 	cpu_cache_sysfs_exit(cpu);
cpu               640 drivers/base/cacheinfo.c static int cacheinfo_cpu_online(unsigned int cpu)
cpu               642 drivers/base/cacheinfo.c 	int rc = detect_cache_attributes(cpu);
cpu               646 drivers/base/cacheinfo.c 	rc = cache_add_dev(cpu);
cpu               648 drivers/base/cacheinfo.c 		free_cache_attributes(cpu);
cpu               652 drivers/base/cacheinfo.c static int cacheinfo_cpu_pre_down(unsigned int cpu)
cpu               654 drivers/base/cacheinfo.c 	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
cpu               655 drivers/base/cacheinfo.c 		cpu_cache_sysfs_exit(cpu);
cpu               657 drivers/base/cacheinfo.c 	free_cache_attributes(cpu);
cpu                38 drivers/base/cpu.c static void change_cpu_under_node(struct cpu *cpu,
cpu                41 drivers/base/cpu.c 	int cpuid = cpu->dev.id;
cpu                44 drivers/base/cpu.c 	cpu->node_id = to_nid;
cpu                49 drivers/base/cpu.c 	struct cpu *cpu = container_of(dev, struct cpu, dev);
cpu                65 drivers/base/cpu.c 		change_cpu_under_node(cpu, from_nid, to_nid);
cpu                75 drivers/base/cpu.c void unregister_cpu(struct cpu *cpu)
cpu                77 drivers/base/cpu.c 	int logical_cpu = cpu->dev.id;
cpu                81 drivers/base/cpu.c 	device_unregister(&cpu->dev);
cpu               145 drivers/base/cpu.c 	struct cpu *cpu = container_of(dev, struct cpu, dev);
cpu               150 drivers/base/cpu.c 	cpunum = cpu->dev.id;
cpu               366 drivers/base/cpu.c int register_cpu(struct cpu *cpu, int num)
cpu               370 drivers/base/cpu.c 	cpu->node_id = cpu_to_node(num);
cpu               371 drivers/base/cpu.c 	memset(&cpu->dev, 0x00, sizeof(struct device));
cpu               372 drivers/base/cpu.c 	cpu->dev.id = num;
cpu               373 drivers/base/cpu.c 	cpu->dev.bus = &cpu_subsys;
cpu               374 drivers/base/cpu.c 	cpu->dev.release = cpu_device_release;
cpu               375 drivers/base/cpu.c 	cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu               376 drivers/base/cpu.c 	cpu->dev.offline = !cpu_online(num);
cpu               377 drivers/base/cpu.c 	cpu->dev.of_node = of_get_cpu_node(num, NULL);
cpu               379 drivers/base/cpu.c 	cpu->dev.bus->uevent = cpu_uevent;
cpu               381 drivers/base/cpu.c 	cpu->dev.groups = common_cpu_attr_groups;
cpu               382 drivers/base/cpu.c 	if (cpu->hotpluggable)
cpu               383 drivers/base/cpu.c 		cpu->dev.groups = hotplugable_cpu_attr_groups;
cpu               384 drivers/base/cpu.c 	error = device_register(&cpu->dev);
cpu               386 drivers/base/cpu.c 		put_device(&cpu->dev);
cpu               390 drivers/base/cpu.c 	per_cpu(cpu_sys_devices, num) = &cpu->dev;
cpu               392 drivers/base/cpu.c 	dev_pm_qos_expose_latency_limit(&cpu->dev,
cpu               398 drivers/base/cpu.c struct device *get_cpu_device(unsigned cpu)
cpu               400 drivers/base/cpu.c 	if (cpu < nr_cpu_ids && cpu_possible(cpu))
cpu               401 drivers/base/cpu.c 		return per_cpu(cpu_sys_devices, cpu);
cpu               496 drivers/base/cpu.c bool cpu_is_hotpluggable(unsigned cpu)
cpu               498 drivers/base/cpu.c 	struct device *dev = get_cpu_device(cpu);
cpu               499 drivers/base/cpu.c 	return dev && container_of(dev, struct cpu, dev)->hotpluggable;
cpu               504 drivers/base/cpu.c static DEFINE_PER_CPU(struct cpu, cpu_devices);
cpu               657 drivers/base/node.c int register_cpu_under_node(unsigned int cpu, unsigned int nid)
cpu               665 drivers/base/node.c 	obj = get_cpu_device(cpu);
cpu               730 drivers/base/node.c int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
cpu               737 drivers/base/node.c 	obj = get_cpu_device(cpu);
cpu               910 drivers/base/node.c 	int cpu;
cpu               919 drivers/base/node.c 	for_each_present_cpu(cpu) {
cpu               920 drivers/base/node.c 		if (cpu_to_node(cpu) == nid)
cpu               921 drivers/base/node.c 			register_cpu_under_node(cpu, nid);
cpu              1450 drivers/base/power/domain.c 				 int cpu, bool set, unsigned int depth)
cpu              1461 drivers/base/power/domain.c 		genpd_update_cpumask(master, cpu, set, depth + 1);
cpu              1466 drivers/base/power/domain.c 		cpumask_set_cpu(cpu, genpd->cpus);
cpu              1468 drivers/base/power/domain.c 		cpumask_clear_cpu(cpu, genpd->cpus);
cpu              1471 drivers/base/power/domain.c static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
cpu              1473 drivers/base/power/domain.c 	if (cpu >= 0)
cpu              1474 drivers/base/power/domain.c 		genpd_update_cpumask(genpd, cpu, true, 0);
cpu              1477 drivers/base/power/domain.c static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
cpu              1479 drivers/base/power/domain.c 	if (cpu >= 0)
cpu              1480 drivers/base/power/domain.c 		genpd_update_cpumask(genpd, cpu, false, 0);
cpu              1485 drivers/base/power/domain.c 	int cpu;
cpu              1490 drivers/base/power/domain.c 	for_each_possible_cpu(cpu) {
cpu              1491 drivers/base/power/domain.c 		if (get_cpu_device(cpu) == dev)
cpu              1492 drivers/base/power/domain.c 			return cpu;
cpu              1513 drivers/base/power/domain.c 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
cpu              1521 drivers/base/power/domain.c 	genpd_set_cpumask(genpd, gpd_data->cpu);
cpu              1581 drivers/base/power/domain.c 	genpd_clear_cpumask(genpd, gpd_data->cpu);
cpu               258 drivers/base/power/domain_governor.c 	int cpu, i;
cpu               273 drivers/base/power/domain_governor.c 	for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
cpu               274 drivers/base/power/domain_governor.c 		dev = per_cpu(cpuidle_devices, cpu);
cpu               108 drivers/base/test/test_async_driver_probe.c 	int err, nid, cpu;
cpu               112 drivers/base/test/test_async_driver_probe.c 	for_each_online_cpu(cpu) {
cpu               113 drivers/base/test/test_async_driver_probe.c 		nid = cpu_to_node(cpu);
cpu               147 drivers/base/test/test_async_driver_probe.c 	for_each_online_cpu(cpu) {
cpu               148 drivers/base/test/test_async_driver_probe.c 		nid = cpu_to_node(cpu);
cpu               177 drivers/base/test/test_async_driver_probe.c 	nid = cpu_to_node(cpu);
cpu               121 drivers/base/topology.c static int topology_add_dev(unsigned int cpu)
cpu               123 drivers/base/topology.c 	struct device *dev = get_cpu_device(cpu);
cpu               128 drivers/base/topology.c static int topology_remove_dev(unsigned int cpu)
cpu               130 drivers/base/topology.c 	struct device *dev = get_cpu_device(cpu);
cpu               282 drivers/bcma/driver_mips.c 	struct bcma_device *cpu, *pcie, *i2s;
cpu               291 drivers/bcma/driver_mips.c 	cpu = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
cpu               294 drivers/bcma/driver_mips.c 	if (cpu && pcie && i2s &&
cpu               295 drivers/bcma/driver_mips.c 	    bcma_aread32(cpu, BCMA_MIPS_OOBSELINA74) == 0x08060504 &&
cpu               298 drivers/bcma/driver_mips.c 		bcma_awrite32(cpu, BCMA_MIPS_OOBSELINA74, 0x07060504);
cpu               508 drivers/block/drbd/drbd_main.c 		unsigned int cpu, min = ~0;
cpu               512 drivers/block/drbd/drbd_main.c 			for_each_cpu(cpu, resource->cpu_mask)
cpu               513 drivers/block/drbd/drbd_main.c 				resources_per_cpu[cpu]++;
cpu               516 drivers/block/drbd/drbd_main.c 		for_each_online_cpu(cpu) {
cpu               517 drivers/block/drbd/drbd_main.c 			if (resources_per_cpu[cpu] < min) {
cpu               518 drivers/block/drbd/drbd_main.c 				min = resources_per_cpu[cpu];
cpu               519 drivers/block/drbd/drbd_main.c 				min_index = cpu;
cpu              3886 drivers/block/mtip32xx/mtip32xx.c static void drop_cpu(int cpu)
cpu              3888 drivers/block/mtip32xx/mtip32xx.c 	cpu_use[cpu]--;
cpu              3893 drivers/block/mtip32xx/mtip32xx.c 	int cpu, least_used_cpu, least_cnt;
cpu              3899 drivers/block/mtip32xx/mtip32xx.c 	cpu = least_used_cpu;
cpu              3901 drivers/block/mtip32xx/mtip32xx.c 	for_each_cpu(cpu, node_mask) {
cpu              3902 drivers/block/mtip32xx/mtip32xx.c 		if (cpu_use[cpu] < least_cnt) {
cpu              3903 drivers/block/mtip32xx/mtip32xx.c 			least_used_cpu = cpu;
cpu              3904 drivers/block/mtip32xx/mtip32xx.c 			least_cnt = cpu_use[cpu];
cpu              4003 drivers/block/mtip32xx/mtip32xx.c 	int cpu, i = 0, j = 0;
cpu              4072 drivers/block/mtip32xx/mtip32xx.c 		for_each_cpu(cpu, node_mask)
cpu              4074 drivers/block/mtip32xx/mtip32xx.c 			snprintf(&cpu_list[j], 256 - j, "%d ", cpu);
cpu              4101 drivers/block/mtip32xx/mtip32xx.c 	for_each_present_cpu(cpu) {
cpu              4104 drivers/block/mtip32xx/mtip32xx.c 			if (dd->work[i].cpu_binding == cpu) {
cpu              4110 drivers/block/mtip32xx/mtip32xx.c 			dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list);
cpu               158 drivers/block/zram/zcomp.c int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
cpu               163 drivers/block/zram/zcomp.c 	if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
cpu               171 drivers/block/zram/zcomp.c 	*per_cpu_ptr(comp->stream, cpu) = zstrm;
cpu               175 drivers/block/zram/zcomp.c int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
cpu               180 drivers/block/zram/zcomp.c 	zstrm = *per_cpu_ptr(comp->stream, cpu);
cpu               183 drivers/block/zram/zcomp.c 	*per_cpu_ptr(comp->stream, cpu) = NULL;
cpu                22 drivers/block/zram/zcomp.h int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
cpu                23 drivers/block/zram/zcomp.h int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
cpu               184 drivers/bus/arm-cci.c 	int port, cpu;
cpu               194 drivers/bus/arm-cci.c 	for_each_possible_cpu(cpu) {
cpu               196 drivers/bus/arm-cci.c 		cpun = of_get_cpu_node(cpu, NULL);
cpu               205 drivers/bus/arm-cci.c 		init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
cpu               208 drivers/bus/arm-cci.c 	for_each_possible_cpu(cpu) {
cpu               209 drivers/bus/arm-cci.c 		WARN(!cpu_port_is_valid(&cpu_port[cpu]),
cpu               211 drivers/bus/arm-cci.c 			cpu);
cpu               267 drivers/bus/arm-cci.c 	int cpu;
cpu               269 drivers/bus/arm-cci.c 	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
cpu               270 drivers/bus/arm-cci.c 		is_valid = cpu_port_is_valid(&cpu_port[cpu]);
cpu               271 drivers/bus/arm-cci.c 		if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
cpu               272 drivers/bus/arm-cci.c 			cci_port_control(cpu_port[cpu].port, false);
cpu                73 drivers/bus/mips_cdmm.c 	retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
cpu               100 drivers/bus/mips_cdmm.c CDMM_ATTR(cpu, "%u\n", dev->cpu);
cpu               200 drivers/bus/mips_cdmm.c 	_BUILD_RET_##_ret work_on_cpu(cdev->cpu,			\
cpu               291 drivers/bus/mips_cdmm.c 	unsigned int cpu;
cpu               296 drivers/bus/mips_cdmm.c 	cpu = smp_processor_id();
cpu               298 drivers/bus/mips_cdmm.c 	if (cpu == 0)
cpu               302 drivers/bus/mips_cdmm.c 	bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu);
cpu               503 drivers/bus/mips_cdmm.c 	unsigned int cpu = smp_processor_id();
cpu               513 drivers/bus/mips_cdmm.c 	pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
cpu               524 drivers/bus/mips_cdmm.c 			cpu, id, drb, drb * CDMM_DRB_SIZE,
cpu               532 drivers/bus/mips_cdmm.c 		dev->cpu = cpu;
cpu               539 drivers/bus/mips_cdmm.c 		dev->dev.parent = get_cpu_device(cpu);
cpu               544 drivers/bus/mips_cdmm.c 		dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
cpu               583 drivers/bus/mips_cdmm.c 	unsigned int cpu = *(unsigned int *)data;			\
cpu               585 drivers/bus/mips_cdmm.c 	if (cdev->cpu != cpu || !dev->driver)				\
cpu               606 drivers/bus/mips_cdmm.c static int mips_cdmm_cpu_down_prep(unsigned int cpu)
cpu               612 drivers/bus/mips_cdmm.c 	ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
cpu               638 drivers/bus/mips_cdmm.c static int mips_cdmm_cpu_online(unsigned int cpu)
cpu               655 drivers/bus/mips_cdmm.c 		ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
cpu              2422 drivers/char/random.c 	int cpu;
cpu              2425 drivers/char/random.c 	for_each_possible_cpu (cpu) {
cpu              2428 drivers/char/random.c 		batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
cpu              2433 drivers/char/random.c 		batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
cpu                27 drivers/clk/imx/clk-cpu.c 	struct clk_cpu *cpu = to_clk_cpu(hw);
cpu                29 drivers/clk/imx/clk-cpu.c 	return clk_get_rate(cpu->div);
cpu                35 drivers/clk/imx/clk-cpu.c 	struct clk_cpu *cpu = to_clk_cpu(hw);
cpu                37 drivers/clk/imx/clk-cpu.c 	return clk_round_rate(cpu->pll, rate);
cpu                43 drivers/clk/imx/clk-cpu.c 	struct clk_cpu *cpu = to_clk_cpu(hw);
cpu                47 drivers/clk/imx/clk-cpu.c 	ret = clk_set_parent(cpu->mux, cpu->step);
cpu                52 drivers/clk/imx/clk-cpu.c 	ret = clk_set_rate(cpu->pll, rate);
cpu                54 drivers/clk/imx/clk-cpu.c 		clk_set_parent(cpu->mux, cpu->pll);
cpu                58 drivers/clk/imx/clk-cpu.c 	clk_set_parent(cpu->mux, cpu->pll);
cpu                61 drivers/clk/imx/clk-cpu.c 	clk_set_rate(cpu->div, rate);
cpu                76 drivers/clk/imx/clk-cpu.c 	struct clk_cpu *cpu;
cpu                81 drivers/clk/imx/clk-cpu.c 	cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
cpu                82 drivers/clk/imx/clk-cpu.c 	if (!cpu)
cpu                85 drivers/clk/imx/clk-cpu.c 	cpu->div = div;
cpu                86 drivers/clk/imx/clk-cpu.c 	cpu->mux = mux;
cpu                87 drivers/clk/imx/clk-cpu.c 	cpu->pll = pll;
cpu                88 drivers/clk/imx/clk-cpu.c 	cpu->step = step;
cpu                96 drivers/clk/imx/clk-cpu.c 	cpu->hw.init = &init;
cpu                97 drivers/clk/imx/clk-cpu.c 	hw = &cpu->hw;
cpu               101 drivers/clk/imx/clk-cpu.c 		kfree(cpu);
cpu                95 drivers/clk/imx/clk-imx25.c 	clk[cpu] = imx_clk_divider("cpu", "cpu_sel", ccm(CCM_CCTL), 30, 2);
cpu               256 drivers/clk/mvebu/ap-cpu-clk.c 		int cpu, err;
cpu               258 drivers/clk/mvebu/ap-cpu-clk.c 		err = of_property_read_u32(dn, "reg", &cpu);
cpu               263 drivers/clk/mvebu/ap-cpu-clk.c 		if (cpu & APN806_CLUSTER_NUM_MASK) {
cpu               288 drivers/clk/mvebu/ap-cpu-clk.c 		int cpu, err;
cpu               290 drivers/clk/mvebu/ap-cpu-clk.c 		err = of_property_read_u32(dn, "reg", &cpu);
cpu               294 drivers/clk/mvebu/ap-cpu-clk.c 		cluster_index = cpu & APN806_CLUSTER_NUM_MASK;
cpu               269 drivers/clk/mvebu/armada-37xx-periph.c static PERIPH_PM_CPU(cpu, 22, DIV_SEL0, 28);
cpu               288 drivers/clk/mvebu/armada-37xx-periph.c 	REF_CLK_PM_CPU(cpu),
cpu                35 drivers/clk/mvebu/clk-cpu.c 	int cpu;
cpu                55 drivers/clk/mvebu/clk-cpu.c 	div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
cpu                84 drivers/clk/mvebu/clk-cpu.c 		& (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
cpu                85 drivers/clk/mvebu/clk-cpu.c 		| (div << (cpuclk->cpu * 8));
cpu                88 drivers/clk/mvebu/clk-cpu.c 	reload_mask = 1 << (20 + cpuclk->cpu);
cpu               148 drivers/clk/mvebu/clk-cpu.c 	return mvebu_pmsu_dfs_request(cpuclk->cpu);
cpu               199 drivers/clk/mvebu/clk-cpu.c 		int cpu, err;
cpu               204 drivers/clk/mvebu/clk-cpu.c 		err = of_property_read_u32(dn, "reg", &cpu);
cpu               208 drivers/clk/mvebu/clk-cpu.c 		sprintf(clk_name, "cpu%d", cpu);
cpu               210 drivers/clk/mvebu/clk-cpu.c 		cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
cpu               211 drivers/clk/mvebu/clk-cpu.c 		cpuclk[cpu].clk_name = clk_name;
cpu               212 drivers/clk/mvebu/clk-cpu.c 		cpuclk[cpu].cpu = cpu;
cpu               213 drivers/clk/mvebu/clk-cpu.c 		cpuclk[cpu].reg_base = clock_complex_base;
cpu               215 drivers/clk/mvebu/clk-cpu.c 			cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
cpu               216 drivers/clk/mvebu/clk-cpu.c 		cpuclk[cpu].hw.init = &init;
cpu               218 drivers/clk/mvebu/clk-cpu.c 		init.name = cpuclk[cpu].clk_name;
cpu               221 drivers/clk/mvebu/clk-cpu.c 		init.parent_names = &cpuclk[cpu].parent_name;
cpu               224 drivers/clk/mvebu/clk-cpu.c 		clk = clk_register(NULL, &cpuclk[cpu].hw);
cpu               227 drivers/clk/mvebu/clk-cpu.c 		clks[cpu] = clk;
cpu                94 drivers/clk/mxs/clk-imx23.c 	cpu, hbus, xbus, emi, uart,
cpu               123 drivers/clk/mxs/clk-imx23.c 	clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 7, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
cpu               149 drivers/clk/mxs/clk-imx28.c 	cpu, hbus, xbus, emi, uart,
cpu               188 drivers/clk/mxs/clk-imx28.c 	clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 18, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
cpu               295 drivers/clk/qcom/krait-cc.c 	int cpu;
cpu               321 drivers/clk/qcom/krait-cc.c 	for_each_possible_cpu(cpu) {
cpu               322 drivers/clk/qcom/krait-cc.c 		clk = krait_add_clks(dev, cpu, id->data);
cpu               325 drivers/clk/qcom/krait-cc.c 		clks[cpu] = clk;
cpu               340 drivers/clk/qcom/krait-cc.c 	for_each_online_cpu(cpu) {
cpu               342 drivers/clk/qcom/krait-cc.c 		WARN(clk_prepare_enable(clks[cpu]),
cpu               343 drivers/clk/qcom/krait-cc.c 		     "Unable to turn on CPU%d clock", cpu);
cpu               367 drivers/clk/qcom/krait-cc.c 	for_each_possible_cpu(cpu) {
cpu               368 drivers/clk/qcom/krait-cc.c 		clk = clks[cpu];
cpu               371 drivers/clk/qcom/krait-cc.c 			pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu);
cpu               378 drivers/clk/qcom/krait-cc.c 		pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000);
cpu               140 drivers/clk/sirf/clk-atlas6.c 	clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
cpu               139 drivers/clk/sirf/clk-prima2.c 	clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
cpu              1092 drivers/clk/tegra/clk-tegra114.c static void tegra114_wait_cpu_in_reset(u32 cpu)
cpu              1099 drivers/clk/tegra/clk-tegra114.c 	} while (!(reg & (1 << cpu)));  /* check CPU been reset or not */
cpu              1102 drivers/clk/tegra/clk-tegra114.c static void tegra114_disable_cpu_clock(u32 cpu)
cpu              1188 drivers/clk/tegra/clk-tegra124.c static void tegra124_wait_cpu_in_reset(u32 cpu)
cpu              1195 drivers/clk/tegra/clk-tegra124.c 	} while (!(reg & (1 << cpu)));  /* check CPU been reset or not */
cpu              1198 drivers/clk/tegra/clk-tegra124.c static void tegra124_disable_cpu_clock(u32 cpu)
cpu               116 drivers/clk/tegra/clk-tegra20.c #define CPU_CLOCK(cpu)	(0x1 << (8 + cpu))
cpu               117 drivers/clk/tegra/clk-tegra20.c #define CPU_RESET(cpu)	(0x1111ul << (cpu))
cpu               912 drivers/clk/tegra/clk-tegra20.c static void tegra20_wait_cpu_in_reset(u32 cpu)
cpu               920 drivers/clk/tegra/clk-tegra20.c 	} while (!(reg & (1 << cpu)));	/* check CPU been reset or not */
cpu               925 drivers/clk/tegra/clk-tegra20.c static void tegra20_put_cpu_in_reset(u32 cpu)
cpu               927 drivers/clk/tegra/clk-tegra20.c 	writel(CPU_RESET(cpu),
cpu               932 drivers/clk/tegra/clk-tegra20.c static void tegra20_cpu_out_of_reset(u32 cpu)
cpu               934 drivers/clk/tegra/clk-tegra20.c 	writel(CPU_RESET(cpu),
cpu               939 drivers/clk/tegra/clk-tegra20.c static void tegra20_enable_cpu_clock(u32 cpu)
cpu               944 drivers/clk/tegra/clk-tegra20.c 	writel(reg & ~CPU_CLOCK(cpu),
cpu               950 drivers/clk/tegra/clk-tegra20.c static void tegra20_disable_cpu_clock(u32 cpu)
cpu               955 drivers/clk/tegra/clk-tegra20.c 	writel(reg | CPU_CLOCK(cpu),
cpu              3274 drivers/clk/tegra/clk-tegra210.c static void tegra210_wait_cpu_in_reset(u32 cpu)
cpu              3281 drivers/clk/tegra/clk-tegra210.c 	} while (!(reg & (1 << cpu)));  /* check CPU been reset or not */
cpu              3284 drivers/clk/tegra/clk-tegra210.c static void tegra210_disable_cpu_clock(u32 cpu)
cpu               117 drivers/clk/tegra/clk-tegra30.c #define CPU_CLOCK(cpu)	(0x1 << (8 + cpu))
cpu               118 drivers/clk/tegra/clk-tegra30.c #define CPU_RESET(cpu)	(0x1111ul << (cpu))
cpu              1086 drivers/clk/tegra/clk-tegra30.c static void tegra30_wait_cpu_in_reset(u32 cpu)
cpu              1094 drivers/clk/tegra/clk-tegra30.c 	} while (!(reg & (1 << cpu)));	/* check CPU been reset or not */
cpu              1099 drivers/clk/tegra/clk-tegra30.c static void tegra30_put_cpu_in_reset(u32 cpu)
cpu              1101 drivers/clk/tegra/clk-tegra30.c 	writel(CPU_RESET(cpu),
cpu              1106 drivers/clk/tegra/clk-tegra30.c static void tegra30_cpu_out_of_reset(u32 cpu)
cpu              1108 drivers/clk/tegra/clk-tegra30.c 	writel(CPU_RESET(cpu),
cpu              1113 drivers/clk/tegra/clk-tegra30.c static void tegra30_enable_cpu_clock(u32 cpu)
cpu              1117 drivers/clk/tegra/clk-tegra30.c 	writel(CPU_CLOCK(cpu),
cpu              1123 drivers/clk/tegra/clk-tegra30.c static void tegra30_disable_cpu_clock(u32 cpu)
cpu              1128 drivers/clk/tegra/clk-tegra30.c 	writel(reg | CPU_CLOCK(cpu),
cpu               305 drivers/clocksource/arc_timer.c static int arc_timer_starting_cpu(unsigned int cpu)
cpu               316 drivers/clocksource/arc_timer.c static int arc_timer_dying_cpu(unsigned int cpu)
cpu               865 drivers/clocksource/arm_arch_timer.c static int arch_timer_starting_cpu(unsigned int cpu)
cpu              1011 drivers/clocksource/arm_arch_timer.c static int arch_timer_dying_cpu(unsigned int cpu)
cpu               165 drivers/clocksource/arm_global_timer.c static int gt_starting_cpu(unsigned int cpu)
cpu               177 drivers/clocksource/arm_global_timer.c 	clk->cpumask = cpumask_of(cpu);
cpu               186 drivers/clocksource/arm_global_timer.c static int gt_dying_cpu(unsigned int cpu)
cpu                16 drivers/clocksource/dummy_timer.c static int dummy_timer_starting_cpu(unsigned int cpu)
cpu                18 drivers/clocksource/dummy_timer.c 	struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu);
cpu                25 drivers/clocksource/dummy_timer.c 	evt->cpumask	= cpumask_of(cpu);
cpu               240 drivers/clocksource/dw_apb_timer.c dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
cpu               260 drivers/clocksource/dw_apb_timer.c 	dw_ced->ced.cpumask = cpumask_of(cpu);
cpu               446 drivers/clocksource/exynos_mct.c static int exynos4_mct_starting_cpu(unsigned int cpu)
cpu               449 drivers/clocksource/exynos_mct.c 		per_cpu_ptr(&percpu_mct_tick, cpu);
cpu               452 drivers/clocksource/exynos_mct.c 	mevt->base = EXYNOS4_MCT_L_BASE(cpu);
cpu               453 drivers/clocksource/exynos_mct.c 	snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
cpu               456 drivers/clocksource/exynos_mct.c 	evt->cpumask = cpumask_of(cpu);
cpu               473 drivers/clocksource/exynos_mct.c 		irq_force_affinity(evt->irq, cpumask_of(cpu));
cpu               484 drivers/clocksource/exynos_mct.c static int exynos4_mct_dying_cpu(unsigned int cpu)
cpu               487 drivers/clocksource/exynos_mct.c 		per_cpu_ptr(&percpu_mct_tick, cpu);
cpu               503 drivers/clocksource/exynos_mct.c 	int err, cpu;
cpu               528 drivers/clocksource/exynos_mct.c 		for_each_possible_cpu(cpu) {
cpu               529 drivers/clocksource/exynos_mct.c 			int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
cpu               531 drivers/clocksource/exynos_mct.c 				per_cpu_ptr(&percpu_mct_tick, cpu);
cpu               541 drivers/clocksource/exynos_mct.c 									cpu);
cpu               563 drivers/clocksource/exynos_mct.c 		for_each_possible_cpu(cpu) {
cpu               565 drivers/clocksource/exynos_mct.c 				per_cpu_ptr(&percpu_mct_tick, cpu);
cpu               105 drivers/clocksource/hyperv_timer.c void hv_stimer_init(unsigned int cpu)
cpu               117 drivers/clocksource/hyperv_timer.c 	ce = per_cpu_ptr(hv_clock_event, cpu);
cpu               120 drivers/clocksource/hyperv_timer.c 	ce->cpumask = cpumask_of(cpu);
cpu               136 drivers/clocksource/hyperv_timer.c void hv_stimer_cleanup(unsigned int cpu)
cpu               142 drivers/clocksource/hyperv_timer.c 		ce = per_cpu_ptr(hv_clock_event, cpu);
cpu               192 drivers/clocksource/hyperv_timer.c 	int	cpu;
cpu               196 drivers/clocksource/hyperv_timer.c 		for_each_present_cpu(cpu) {
cpu               197 drivers/clocksource/hyperv_timer.c 			ce = per_cpu_ptr(hv_clock_event, cpu);
cpu               198 drivers/clocksource/hyperv_timer.c 			clockevents_unbind_device(ce, cpu);
cpu               108 drivers/clocksource/jcore-pit.c static int jcore_pit_local_init(unsigned cpu)
cpu               113 drivers/clocksource/jcore-pit.c 	pr_info("Local J-Core PIT init on cpu %u\n", cpu);
cpu               139 drivers/clocksource/jcore-pit.c 	unsigned pit_irq, cpu;
cpu               218 drivers/clocksource/jcore-pit.c 	for_each_present_cpu(cpu) {
cpu               219 drivers/clocksource/jcore-pit.c 		struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu);
cpu               221 drivers/clocksource/jcore-pit.c 		pit->base = of_iomap(node, cpu);
cpu               223 drivers/clocksource/jcore-pit.c 			pr_err("Unable to map PIT for cpu %u\n", cpu);
cpu               231 drivers/clocksource/jcore-pit.c 		pit->ced.cpumask = cpumask_of(cpu);
cpu                45 drivers/clocksource/mips-gic-timer.c 	int cpu = cpumask_first(evt->cpumask);
cpu                51 drivers/clocksource/mips-gic-timer.c 	if (cpu == raw_smp_processor_id()) {
cpu                54 drivers/clocksource/mips-gic-timer.c 		write_gic_vl_other(mips_cm_vp_id(cpu));
cpu                77 drivers/clocksource/mips-gic-timer.c static void gic_clockevent_cpu_init(unsigned int cpu,
cpu                86 drivers/clocksource/mips-gic-timer.c 	cd->cpumask		= cpumask_of(cpu);
cpu               106 drivers/clocksource/mips-gic-timer.c static int gic_starting_cpu(unsigned int cpu)
cpu               108 drivers/clocksource/mips-gic-timer.c 	gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
cpu               123 drivers/clocksource/mips-gic-timer.c static int gic_dying_cpu(unsigned int cpu)
cpu               173 drivers/clocksource/timer-armada-370-xp.c static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
cpu               175 drivers/clocksource/timer-armada-370-xp.c 	struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
cpu               195 drivers/clocksource/timer-armada-370-xp.c 	evt->cpumask		= cpumask_of(cpu);
cpu               203 drivers/clocksource/timer-armada-370-xp.c static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
cpu               205 drivers/clocksource/timer-armada-370-xp.c 	struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
cpu                73 drivers/clocksource/timer-atlas7.c 	int cpu = smp_processor_id();
cpu                76 drivers/clocksource/timer-atlas7.c 	writel_relaxed(BIT(cpu), sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
cpu                79 drivers/clocksource/timer-atlas7.c 		sirfsoc_timer_count_disable(cpu);
cpu               103 drivers/clocksource/timer-atlas7.c 	int cpu = smp_processor_id();
cpu               106 drivers/clocksource/timer-atlas7.c 	sirfsoc_timer_count_disable(cpu);
cpu               109 drivers/clocksource/timer-atlas7.c 		4 * cpu);
cpu               111 drivers/clocksource/timer-atlas7.c 		4 * cpu);
cpu               114 drivers/clocksource/timer-atlas7.c 	sirfsoc_timer_count_enable(cpu);
cpu               174 drivers/clocksource/timer-atlas7.c static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
cpu               176 drivers/clocksource/timer-atlas7.c 	struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
cpu               179 drivers/clocksource/timer-atlas7.c 	if (cpu == 0)
cpu               197 drivers/clocksource/timer-atlas7.c 	ce->cpumask = cpumask_of(cpu);
cpu               201 drivers/clocksource/timer-atlas7.c 	irq_force_affinity(action->irq, cpumask_of(cpu));
cpu               207 drivers/clocksource/timer-atlas7.c static int sirfsoc_local_timer_dying_cpu(unsigned int cpu)
cpu               211 drivers/clocksource/timer-atlas7.c 	if (cpu == 0)
cpu                76 drivers/clocksource/timer-mp-csky.c static int csky_mptimer_starting_cpu(unsigned int cpu)
cpu                78 drivers/clocksource/timer-mp-csky.c 	struct timer_of *to = per_cpu_ptr(&csky_to, cpu);
cpu                80 drivers/clocksource/timer-mp-csky.c 	to->clkevt.cpumask = cpumask_of(cpu);
cpu                90 drivers/clocksource/timer-mp-csky.c static int csky_mptimer_dying_cpu(unsigned int cpu)
cpu               120 drivers/clocksource/timer-mp-csky.c 	int ret, cpu, cpu_rollback;
cpu               144 drivers/clocksource/timer-mp-csky.c 	for_each_possible_cpu(cpu) {
cpu               145 drivers/clocksource/timer-mp-csky.c 		to = per_cpu_ptr(&csky_to, cpu);
cpu               165 drivers/clocksource/timer-mp-csky.c 		if (cpu_rollback == cpu)
cpu               225 drivers/clocksource/timer-nps.c static int nps_timer_starting_cpu(unsigned int cpu)
cpu               237 drivers/clocksource/timer-nps.c static int nps_timer_dying_cpu(unsigned int cpu)
cpu                99 drivers/clocksource/timer-qcom.c static int msm_local_timer_starting_cpu(unsigned int cpu)
cpu               101 drivers/clocksource/timer-qcom.c 	struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
cpu               112 drivers/clocksource/timer-qcom.c 	evt->cpumask = cpumask_of(cpu);
cpu               129 drivers/clocksource/timer-qcom.c static int msm_local_timer_dying_cpu(unsigned int cpu)
cpu               131 drivers/clocksource/timer-qcom.c 	struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
cpu                57 drivers/clocksource/timer-riscv.c static int riscv_timer_starting_cpu(unsigned int cpu)
cpu                59 drivers/clocksource/timer-riscv.c 	struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
cpu                61 drivers/clocksource/timer-riscv.c 	ce->cpumask = cpumask_of(cpu);
cpu                68 drivers/clocksource/timer-riscv.c static int riscv_timer_dying_cpu(unsigned int cpu)
cpu               132 drivers/clocksource/timer-tegra.c static int tegra_timer_setup(unsigned int cpu)
cpu               134 drivers/clocksource/timer-tegra.c 	struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
cpu               139 drivers/clocksource/timer-tegra.c 	irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
cpu               157 drivers/clocksource/timer-tegra.c static int tegra_timer_stop(unsigned int cpu)
cpu               159 drivers/clocksource/timer-tegra.c 	struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
cpu               212 drivers/clocksource/timer-tegra.c static inline unsigned int tegra_base_for_cpu(int cpu, bool tegra20)
cpu               215 drivers/clocksource/timer-tegra.c 		switch (cpu) {
cpu               227 drivers/clocksource/timer-tegra.c 	return TIMER10_BASE + cpu * 8;
cpu               230 drivers/clocksource/timer-tegra.c static inline unsigned int tegra_irq_idx_for_cpu(int cpu, bool tegra20)
cpu               233 drivers/clocksource/timer-tegra.c 		return TIMER1_IRQ_IDX + cpu;
cpu               235 drivers/clocksource/timer-tegra.c 	return TIMER10_IRQ_IDX + cpu;
cpu               255 drivers/clocksource/timer-tegra.c 	int cpu, ret;
cpu               301 drivers/clocksource/timer-tegra.c 	for_each_possible_cpu(cpu) {
cpu               302 drivers/clocksource/timer-tegra.c 		struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
cpu               305 drivers/clocksource/timer-tegra.c 		unsigned int base = tegra_base_for_cpu(cpu, tegra20);
cpu               306 drivers/clocksource/timer-tegra.c 		unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);
cpu               310 drivers/clocksource/timer-tegra.c 			pr_err("failed to map irq for cpu%d\n", cpu);
cpu               317 drivers/clocksource/timer-tegra.c 		cpu_to->clkevt.cpumask = cpumask_of(cpu);
cpu               328 drivers/clocksource/timer-tegra.c 			       cpu, ret);
cpu               356 drivers/clocksource/timer-tegra.c 	for_each_possible_cpu(cpu) {
cpu               359 drivers/clocksource/timer-tegra.c 		cpu_to = per_cpu_ptr(&tegra_to, cpu);
cpu                49 drivers/connector/cn_proc.c 	((struct proc_event *)msg->data)->cpu = smp_processor_id();
cpu               325 drivers/connector/cn_proc.c 	ev->cpu = -1;
cpu                72 drivers/cpufreq/acpi-cpufreq.c static bool boost_state(unsigned int cpu)
cpu                79 drivers/cpufreq/acpi-cpufreq.c 		rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
cpu                84 drivers/cpufreq/acpi-cpufreq.c 		rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
cpu               179 drivers/cpufreq/acpi-cpufreq.c 	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
cpu               181 drivers/cpufreq/acpi-cpufreq.c 	return cpu_has(cpu, X86_FEATURE_EST);
cpu               186 drivers/cpufreq/acpi-cpufreq.c 	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
cpu               188 drivers/cpufreq/acpi-cpufreq.c 	return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
cpu               357 drivers/cpufreq/acpi-cpufreq.c static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
cpu               364 drivers/cpufreq/acpi-cpufreq.c 	pr_debug("%s (%d)\n", __func__, cpu);
cpu               366 drivers/cpufreq/acpi-cpufreq.c 	policy = cpufreq_cpu_get_raw(cpu);
cpu               375 drivers/cpufreq/acpi-cpufreq.c 	freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
cpu               437 drivers/cpufreq/acpi-cpufreq.c 		cpumask_of(policy->cpu) : policy->cpus;
cpu               444 drivers/cpufreq/acpi-cpufreq.c 			pr_debug("%s (%d)\n", __func__, policy->cpu);
cpu               490 drivers/cpufreq/acpi-cpufreq.c acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
cpu               529 drivers/cpufreq/acpi-cpufreq.c static int cpufreq_boost_online(unsigned int cpu)
cpu               538 drivers/cpufreq/acpi-cpufreq.c static int cpufreq_boost_down_prep(unsigned int cpu)
cpu               632 drivers/cpufreq/acpi-cpufreq.c 	unsigned int cpu = policy->cpu;
cpu               635 drivers/cpufreq/acpi-cpufreq.c 	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
cpu               661 drivers/cpufreq/acpi-cpufreq.c 	perf = per_cpu_ptr(acpi_perf_data, cpu);
cpu               662 drivers/cpufreq/acpi-cpufreq.c 	data->acpi_perf_cpu = cpu;
cpu               668 drivers/cpufreq/acpi-cpufreq.c 	result = acpi_processor_register_performance(perf, cpu);
cpu               688 drivers/cpufreq/acpi-cpufreq.c 		cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
cpu               691 drivers/cpufreq/acpi-cpufreq.c 	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
cpu               693 drivers/cpufreq/acpi-cpufreq.c 		cpumask_set_cpu(cpu, policy->cpus);
cpu               695 drivers/cpufreq/acpi-cpufreq.c 			     topology_sibling_cpumask(cpu));
cpu               728 drivers/cpufreq/acpi-cpufreq.c 		if (check_est_cpu(cpu)) {
cpu               734 drivers/cpufreq/acpi-cpufreq.c 		if (check_amd_hwpstate_cpu(cpu)) {
cpu               795 drivers/cpufreq/acpi-cpufreq.c 		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
cpu               807 drivers/cpufreq/acpi-cpufreq.c 	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
cpu               827 drivers/cpufreq/acpi-cpufreq.c 	acpi_processor_unregister_performance(cpu);
cpu               856 drivers/cpufreq/acpi-cpufreq.c 							      policy->cpu);
cpu                45 drivers/cpufreq/amd_freq_sensitivity.c 	struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
cpu                53 drivers/cpufreq/amd_freq_sensitivity.c 	rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
cpu                55 drivers/cpufreq/amd_freq_sensitivity.c 	rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE,
cpu                73 drivers/cpufreq/arm_big_little.c static inline int raw_cpu_to_cluster(int cpu)
cpu                75 drivers/cpufreq/arm_big_little.c 	return topology_physical_package_id(cpu);
cpu                78 drivers/cpufreq/arm_big_little.c static inline int cpu_to_cluster(int cpu)
cpu                81 drivers/cpufreq/arm_big_little.c 		MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
cpu               103 drivers/cpufreq/arm_big_little.c static unsigned int clk_get_cpu_rate(unsigned int cpu)
cpu               105 drivers/cpufreq/arm_big_little.c 	u32 cur_cluster = per_cpu(physical_cluster, cpu);
cpu               112 drivers/cpufreq/arm_big_little.c 	pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
cpu               118 drivers/cpufreq/arm_big_little.c static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
cpu               122 drivers/cpufreq/arm_big_little.c 					cpu));
cpu               124 drivers/cpufreq/arm_big_little.c 		return per_cpu(cpu_last_req_freq, cpu);
cpu               126 drivers/cpufreq/arm_big_little.c 		return clk_get_cpu_rate(cpu);
cpu               131 drivers/cpufreq/arm_big_little.c bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
cpu               140 drivers/cpufreq/arm_big_little.c 		prev_rate = per_cpu(cpu_last_req_freq, cpu);
cpu               141 drivers/cpufreq/arm_big_little.c 		per_cpu(cpu_last_req_freq, cpu) = rate;
cpu               142 drivers/cpufreq/arm_big_little.c 		per_cpu(physical_cluster, cpu) = new_cluster;
cpu               151 drivers/cpufreq/arm_big_little.c 			__func__, cpu, old_cluster, new_cluster, new_rate);
cpu               171 drivers/cpufreq/arm_big_little.c 			per_cpu(cpu_last_req_freq, cpu) = prev_rate;
cpu               172 drivers/cpufreq/arm_big_little.c 			per_cpu(physical_cluster, cpu) = old_cluster;
cpu               185 drivers/cpufreq/arm_big_little.c 				__func__, cpu, old_cluster, new_cluster);
cpu               188 drivers/cpufreq/arm_big_little.c 		bL_switch_request(cpu, new_cluster);
cpu               214 drivers/cpufreq/arm_big_little.c 	u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
cpu               218 drivers/cpufreq/arm_big_little.c 	cur_cluster = cpu_to_cluster(cpu);
cpu               219 drivers/cpufreq/arm_big_little.c 	new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
cpu               233 drivers/cpufreq/arm_big_little.c 	ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
cpu               459 drivers/cpufreq/arm_big_little.c 	u32 cur_cluster = cpu_to_cluster(policy->cpu);
cpu               463 drivers/cpufreq/arm_big_little.c 	cpu_dev = get_cpu_device(policy->cpu);
cpu               466 drivers/cpufreq/arm_big_little.c 				policy->cpu);
cpu               471 drivers/cpufreq/arm_big_little.c 		int cpu;
cpu               473 drivers/cpufreq/arm_big_little.c 		cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
cpu               475 drivers/cpufreq/arm_big_little.c 		for_each_cpu(cpu, policy->cpus)
cpu               476 drivers/cpufreq/arm_big_little.c 			per_cpu(physical_cluster, cpu) = cur_cluster;
cpu               479 drivers/cpufreq/arm_big_little.c 		per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
cpu               493 drivers/cpufreq/arm_big_little.c 		per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
cpu               495 drivers/cpufreq/arm_big_little.c 	dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
cpu               502 drivers/cpufreq/arm_big_little.c 	int cur_cluster = cpu_to_cluster(policy->cpu);
cpu               509 drivers/cpufreq/arm_big_little.c 	cpu_dev = get_cpu_device(policy->cpu);
cpu               512 drivers/cpufreq/arm_big_little.c 				policy->cpu);
cpu               517 drivers/cpufreq/arm_big_little.c 	dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
cpu               524 drivers/cpufreq/arm_big_little.c 	int cur_cluster = cpu_to_cluster(policy->cpu);
cpu                41 drivers/cpufreq/armada-8k-cpufreq.c 	int cpu;
cpu                43 drivers/cpufreq/armada-8k-cpufreq.c 	for_each_possible_cpu(cpu) {
cpu                47 drivers/cpufreq/armada-8k-cpufreq.c 		cpu_dev = get_cpu_device(cpu);
cpu                49 drivers/cpufreq/armada-8k-cpufreq.c 			pr_warn("Failed to get cpu%d device\n", cpu);
cpu                55 drivers/cpufreq/armada-8k-cpufreq.c 			pr_warn("Cannot get clock for CPU %d\n", cpu);
cpu                58 drivers/cpufreq/armada-8k-cpufreq.c 				cpumask_set_cpu(cpu, cpumask);
cpu               125 drivers/cpufreq/armada-8k-cpufreq.c 	int ret = 0, opps_index = 0, cpu, nb_cpus;
cpu               148 drivers/cpufreq/armada-8k-cpufreq.c 	for_each_cpu(cpu, &cpus) {
cpu               153 drivers/cpufreq/armada-8k-cpufreq.c 		cpu_dev = get_cpu_device(cpu);
cpu               156 drivers/cpufreq/armada-8k-cpufreq.c 			pr_err("Cannot get CPU %d\n", cpu);
cpu               163 drivers/cpufreq/armada-8k-cpufreq.c 			pr_err("Cannot get clock for CPU %d\n", cpu);
cpu                87 drivers/cpufreq/bmips-cpufreq.c static unsigned int bmips_cpufreq_get(unsigned int cpu)
cpu               453 drivers/cpufreq/brcmstb-avs-cpufreq.c static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
cpu               455 drivers/cpufreq/brcmstb-avs-cpufreq.c 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
cpu                61 drivers/cpufreq/cppc_cpufreq.c static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
cpu               140 drivers/cpufreq/cppc_cpufreq.c static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
cpu               144 drivers/cpufreq/cppc_cpufreq.c 	struct cppc_perf_caps *caps = &cpu->perf_caps;
cpu               159 drivers/cpufreq/cppc_cpufreq.c 		div = cpu->perf_caps.highest_perf;
cpu               164 drivers/cpufreq/cppc_cpufreq.c static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
cpu               168 drivers/cpufreq/cppc_cpufreq.c 	struct cppc_perf_caps *caps = &cpu->perf_caps;
cpu               182 drivers/cpufreq/cppc_cpufreq.c 		mul = cpu->perf_caps.highest_perf;
cpu               193 drivers/cpufreq/cppc_cpufreq.c 	struct cppc_cpudata *cpu;
cpu               198 drivers/cpufreq/cppc_cpufreq.c 	cpu = all_cpu_data[policy->cpu];
cpu               200 drivers/cpufreq/cppc_cpufreq.c 	desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
cpu               202 drivers/cpufreq/cppc_cpufreq.c 	if (desired_perf == cpu->perf_ctrls.desired_perf)
cpu               205 drivers/cpufreq/cppc_cpufreq.c 	cpu->perf_ctrls.desired_perf = desired_perf;
cpu               210 drivers/cpufreq/cppc_cpufreq.c 	ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls);
cpu               215 drivers/cpufreq/cppc_cpufreq.c 				cpu->cpu, ret);
cpu               228 drivers/cpufreq/cppc_cpufreq.c 	int cpu_num = policy->cpu;
cpu               229 drivers/cpufreq/cppc_cpufreq.c 	struct cppc_cpudata *cpu = all_cpu_data[cpu_num];
cpu               232 drivers/cpufreq/cppc_cpufreq.c 	cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf;
cpu               234 drivers/cpufreq/cppc_cpufreq.c 	ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
cpu               237 drivers/cpufreq/cppc_cpufreq.c 				cpu->perf_caps.lowest_perf, cpu_num, ret);
cpu               249 drivers/cpufreq/cppc_cpufreq.c static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
cpu               263 drivers/cpufreq/cppc_cpufreq.c 			delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
cpu               268 drivers/cpufreq/cppc_cpufreq.c 		delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
cpu               277 drivers/cpufreq/cppc_cpufreq.c static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
cpu               279 drivers/cpufreq/cppc_cpufreq.c 	return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
cpu               285 drivers/cpufreq/cppc_cpufreq.c 	struct cppc_cpudata *cpu;
cpu               286 drivers/cpufreq/cppc_cpufreq.c 	unsigned int cpu_num = policy->cpu;
cpu               289 drivers/cpufreq/cppc_cpufreq.c 	cpu = all_cpu_data[policy->cpu];
cpu               291 drivers/cpufreq/cppc_cpufreq.c 	cpu->cpu = cpu_num;
cpu               292 drivers/cpufreq/cppc_cpufreq.c 	ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps);
cpu               301 drivers/cpufreq/cppc_cpufreq.c 	cpu->perf_caps.lowest_freq *= 1000;
cpu               302 drivers/cpufreq/cppc_cpufreq.c 	cpu->perf_caps.nominal_freq *= 1000;
cpu               308 drivers/cpufreq/cppc_cpufreq.c 	policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
cpu               309 drivers/cpufreq/cppc_cpufreq.c 	policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
cpu               316 drivers/cpufreq/cppc_cpufreq.c 	policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
cpu               317 drivers/cpufreq/cppc_cpufreq.c 	policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
cpu               320 drivers/cpufreq/cppc_cpufreq.c 	policy->shared_type = cpu->shared_type;
cpu               325 drivers/cpufreq/cppc_cpufreq.c 		cpumask_copy(policy->cpus, cpu->shared_cpu_map);
cpu               328 drivers/cpufreq/cppc_cpufreq.c 			if (unlikely(i == policy->cpu))
cpu               331 drivers/cpufreq/cppc_cpufreq.c 			memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
cpu               332 drivers/cpufreq/cppc_cpufreq.c 			       sizeof(cpu->perf_caps));
cpu               340 drivers/cpufreq/cppc_cpufreq.c 	cpu->cur_policy = policy;
cpu               343 drivers/cpufreq/cppc_cpufreq.c 	policy->cur = cppc_cpufreq_perf_to_khz(cpu,
cpu               344 drivers/cpufreq/cppc_cpufreq.c 					cpu->perf_caps.highest_perf);
cpu               345 drivers/cpufreq/cppc_cpufreq.c 	cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
cpu               347 drivers/cpufreq/cppc_cpufreq.c 	ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
cpu               350 drivers/cpufreq/cppc_cpufreq.c 				cpu->perf_caps.highest_perf, cpu_num, ret);
cpu               363 drivers/cpufreq/cppc_cpufreq.c static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu,
cpu               382 drivers/cpufreq/cppc_cpufreq.c 		delivered_perf = cpu->perf_ctrls.desired_perf;
cpu               384 drivers/cpufreq/cppc_cpufreq.c 	return cppc_cpufreq_perf_to_khz(cpu, delivered_perf);
cpu               390 drivers/cpufreq/cppc_cpufreq.c 	struct cppc_cpudata *cpu = all_cpu_data[cpunum];
cpu               406 drivers/cpufreq/cppc_cpufreq.c 	return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
cpu               422 drivers/cpufreq/cppc_cpufreq.c 	struct cppc_cpudata *cpu;
cpu               437 drivers/cpufreq/cppc_cpufreq.c 		cpu = all_cpu_data[i];
cpu               438 drivers/cpufreq/cppc_cpufreq.c 		if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL))
cpu               458 drivers/cpufreq/cppc_cpufreq.c 		cpu = all_cpu_data[i];
cpu               459 drivers/cpufreq/cppc_cpufreq.c 		if (!cpu)
cpu               461 drivers/cpufreq/cppc_cpufreq.c 		free_cpumask_var(cpu->shared_cpu_map);
cpu               462 drivers/cpufreq/cppc_cpufreq.c 		kfree(cpu);
cpu               471 drivers/cpufreq/cppc_cpufreq.c 	struct cppc_cpudata *cpu;
cpu               477 drivers/cpufreq/cppc_cpufreq.c 		cpu = all_cpu_data[i];
cpu               478 drivers/cpufreq/cppc_cpufreq.c 		free_cpumask_var(cpu->shared_cpu_map);
cpu               479 drivers/cpufreq/cppc_cpufreq.c 		kfree(cpu);
cpu                63 drivers/cpufreq/cpufreq-dt.c 	int cpu = dev->id;
cpu                73 drivers/cpufreq/cpufreq-dt.c 	if (!cpu) {
cpu                87 drivers/cpufreq/cpufreq-dt.c 	dev_dbg(dev, "no regulator for cpu%d\n", cpu);
cpu               160 drivers/cpufreq/cpufreq-dt.c 	cpu_dev = get_cpu_device(policy->cpu);
cpu               162 drivers/cpufreq/cpufreq-dt.c 		pr_err("failed to get cpu%d device\n", policy->cpu);
cpu               198 drivers/cpufreq/cpufreq-dt.c 				policy->cpu, ret);
cpu               234 drivers/cpufreq/cpufreq-nforce2.c static unsigned int nforce2_get(unsigned int cpu)
cpu               236 drivers/cpufreq/cpufreq-nforce2.c 	if (cpu)
cpu               262 drivers/cpufreq/cpufreq-nforce2.c 	freqs.old = nforce2_get(policy->cpu);
cpu               313 drivers/cpufreq/cpufreq-nforce2.c 	if (policy->cpu != 0)
cpu               117 drivers/cpufreq/cpufreq.c static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
cpu               125 drivers/cpufreq/cpufreq.c 	busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
cpu               126 drivers/cpufreq/cpufreq.c 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
cpu               127 drivers/cpufreq/cpufreq.c 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
cpu               128 drivers/cpufreq/cpufreq.c 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
cpu               129 drivers/cpufreq/cpufreq.c 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
cpu               130 drivers/cpufreq/cpufreq.c 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
cpu               139 drivers/cpufreq/cpufreq.c u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
cpu               141 drivers/cpufreq/cpufreq.c 	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
cpu               144 drivers/cpufreq/cpufreq.c 		return get_cpu_idle_time_jiffy(cpu, wall);
cpu               146 drivers/cpufreq/cpufreq.c 		idle_time += get_cpu_iowait_time_us(cpu, wall);
cpu               180 drivers/cpufreq/cpufreq.c struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
cpu               182 drivers/cpufreq/cpufreq.c 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
cpu               184 drivers/cpufreq/cpufreq.c 	return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
cpu               188 drivers/cpufreq/cpufreq.c unsigned int cpufreq_generic_get(unsigned int cpu)
cpu               190 drivers/cpufreq/cpufreq.c 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
cpu               194 drivers/cpufreq/cpufreq.c 		       __func__, policy ? "clk" : "policy", cpu);
cpu               213 drivers/cpufreq/cpufreq.c struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
cpu               218 drivers/cpufreq/cpufreq.c 	if (WARN_ON(cpu >= nr_cpu_ids))
cpu               226 drivers/cpufreq/cpufreq.c 		policy = cpufreq_cpu_get_raw(cpu);
cpu               275 drivers/cpufreq/cpufreq.c struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
cpu               277 drivers/cpufreq/cpufreq.c 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
cpu               342 drivers/cpufreq/cpufreq.c 	int cpu;
cpu               378 drivers/cpufreq/cpufreq.c 		for_each_cpu(cpu, policy->cpus)
cpu               379 drivers/cpufreq/cpufreq.c 			trace_cpu_frequency(freqs->new, cpu);
cpu               496 drivers/cpufreq/cpufreq.c 			policy->cpu);
cpu               683 drivers/cpufreq/cpufreq.c __weak unsigned int arch_freq_get_on_cpu(int cpu)
cpu               693 drivers/cpufreq/cpufreq.c 	freq = arch_freq_get_on_cpu(policy->cpu);
cpu               698 drivers/cpufreq/cpufreq.c 		ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
cpu               827 drivers/cpufreq/cpufreq.c 	unsigned int cpu;
cpu               829 drivers/cpufreq/cpufreq.c 	for_each_cpu(cpu, mask) {
cpu               832 drivers/cpufreq/cpufreq.c 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
cpu               891 drivers/cpufreq/cpufreq.c 	ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
cpu               963 drivers/cpufreq/cpufreq.c 	if (cpu_online(policy->cpu)) {
cpu               992 drivers/cpufreq/cpufreq.c static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
cpu               994 drivers/cpufreq/cpufreq.c 	struct device *dev = get_cpu_device(cpu);
cpu               999 drivers/cpufreq/cpufreq.c 	if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
cpu              1062 drivers/cpufreq/cpufreq.c 				 policy->governor->name, policy->cpu);
cpu              1090 drivers/cpufreq/cpufreq.c static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
cpu              1095 drivers/cpufreq/cpufreq.c 	if (cpumask_test_cpu(cpu, policy->cpus))
cpu              1102 drivers/cpufreq/cpufreq.c 	cpumask_set_cpu(cpu, policy->cpus);
cpu              1116 drivers/cpufreq/cpufreq.c 		pr_debug("updating policy for CPU %u\n", policy->cpu);
cpu              1128 drivers/cpufreq/cpufreq.c 	pr_debug("handle_update for cpu %u called\n", policy->cpu);
cpu              1174 drivers/cpufreq/cpufreq.c static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
cpu              1177 drivers/cpufreq/cpufreq.c 	struct device *dev = get_cpu_device(cpu);
cpu              1197 drivers/cpufreq/cpufreq.c 				   cpufreq_global_kobject, "policy%u", cpu);
cpu              1237 drivers/cpufreq/cpufreq.c 	policy->cpu = cpu;
cpu              1260 drivers/cpufreq/cpufreq.c 	int cpu;
cpu              1266 drivers/cpufreq/cpufreq.c 	for_each_cpu(cpu, policy->related_cpus)
cpu              1267 drivers/cpufreq/cpufreq.c 		per_cpu(cpufreq_cpu_data, cpu) = NULL;
cpu              1298 drivers/cpufreq/cpufreq.c static int cpufreq_online(unsigned int cpu)
cpu              1306 drivers/cpufreq/cpufreq.c 	pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
cpu              1309 drivers/cpufreq/cpufreq.c 	policy = per_cpu(cpufreq_cpu_data, cpu);
cpu              1311 drivers/cpufreq/cpufreq.c 		WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
cpu              1313 drivers/cpufreq/cpufreq.c 			return cpufreq_add_policy_cpu(policy, cpu);
cpu              1318 drivers/cpufreq/cpufreq.c 		policy->cpu = cpu;
cpu              1323 drivers/cpufreq/cpufreq.c 		policy = cpufreq_policy_alloc(cpu);
cpu              1339 drivers/cpufreq/cpufreq.c 		cpumask_copy(policy->cpus, cpumask_of(cpu));
cpu              1411 drivers/cpufreq/cpufreq.c 		policy->cur = cpufreq_driver->get(policy->cpu);
cpu              1443 drivers/cpufreq/cpufreq.c 				__func__, policy->cpu, policy->cur);
cpu              1454 drivers/cpufreq/cpufreq.c 				__func__, policy->cpu, policy->cur);
cpu              1473 drivers/cpufreq/cpufreq.c 		       __func__, cpu, ret);
cpu              1515 drivers/cpufreq/cpufreq.c 	unsigned cpu = dev->id;
cpu              1518 drivers/cpufreq/cpufreq.c 	dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
cpu              1520 drivers/cpufreq/cpufreq.c 	if (cpu_online(cpu)) {
cpu              1521 drivers/cpufreq/cpufreq.c 		ret = cpufreq_online(cpu);
cpu              1527 drivers/cpufreq/cpufreq.c 	policy = per_cpu(cpufreq_cpu_data, cpu);
cpu              1529 drivers/cpufreq/cpufreq.c 		add_cpu_dev_symlink(policy, cpu);
cpu              1534 drivers/cpufreq/cpufreq.c static int cpufreq_offline(unsigned int cpu)
cpu              1539 drivers/cpufreq/cpufreq.c 	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
cpu              1541 drivers/cpufreq/cpufreq.c 	policy = cpufreq_cpu_get_raw(cpu);
cpu              1551 drivers/cpufreq/cpufreq.c 	cpumask_clear_cpu(cpu, policy->cpus);
cpu              1559 drivers/cpufreq/cpufreq.c 	} else if (cpu == policy->cpu) {
cpu              1561 drivers/cpufreq/cpufreq.c 		policy->cpu = cpumask_any(policy->cpus);
cpu              1609 drivers/cpufreq/cpufreq.c 	unsigned int cpu = dev->id;
cpu              1610 drivers/cpufreq/cpufreq.c 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
cpu              1615 drivers/cpufreq/cpufreq.c 	if (cpu_online(cpu))
cpu              1616 drivers/cpufreq/cpufreq.c 		cpufreq_offline(cpu);
cpu              1618 drivers/cpufreq/cpufreq.c 	cpumask_clear_cpu(cpu, policy->real_cpus);
cpu              1658 drivers/cpufreq/cpufreq.c 	new_freq = cpufreq_driver->get(policy->cpu);
cpu              1685 drivers/cpufreq/cpufreq.c unsigned int cpufreq_quick_get(unsigned int cpu)
cpu              1694 drivers/cpufreq/cpufreq.c 		ret_freq = cpufreq_driver->get(cpu);
cpu              1701 drivers/cpufreq/cpufreq.c 	policy = cpufreq_cpu_get(cpu);
cpu              1717 drivers/cpufreq/cpufreq.c unsigned int cpufreq_quick_get_max(unsigned int cpu)
cpu              1719 drivers/cpufreq/cpufreq.c 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
cpu              1745 drivers/cpufreq/cpufreq.c unsigned int cpufreq_get(unsigned int cpu)
cpu              1747 drivers/cpufreq/cpufreq.c 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
cpu              2043 drivers/cpufreq/cpufreq.c 		 __func__, policy->cpu, freqs->old, freqs->new);
cpu              2083 drivers/cpufreq/cpufreq.c 			 __func__, policy->cpu, freqs.old, freqs.new);
cpu              2127 drivers/cpufreq/cpufreq.c 		 policy->cpu, target_freq, relation, old_target_freq);
cpu              2205 drivers/cpufreq/cpufreq.c 	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
cpu              2223 drivers/cpufreq/cpufreq.c 	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
cpu              2241 drivers/cpufreq/cpufreq.c 	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
cpu              2263 drivers/cpufreq/cpufreq.c 	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
cpu              2274 drivers/cpufreq/cpufreq.c 	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
cpu              2342 drivers/cpufreq/cpufreq.c int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
cpu              2348 drivers/cpufreq/cpufreq.c 	cpu_policy = cpufreq_cpu_get(cpu);
cpu              2384 drivers/cpufreq/cpufreq.c 	new_data.cpu = policy->cpu;
cpu              2393 drivers/cpufreq/cpufreq.c 		 new_data.cpu, new_data.min, new_data.max);
cpu              2466 drivers/cpufreq/cpufreq.c void cpufreq_update_policy(unsigned int cpu)
cpu              2468 drivers/cpufreq/cpufreq.c 	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
cpu              2495 drivers/cpufreq/cpufreq.c void cpufreq_update_limits(unsigned int cpu)
cpu              2498 drivers/cpufreq/cpufreq.c 		cpufreq_driver->update_limits(cpu);
cpu              2500 drivers/cpufreq/cpufreq.c 		cpufreq_update_policy(cpu);
cpu              2607 drivers/cpufreq/cpufreq.c static int cpuhp_cpufreq_online(unsigned int cpu)
cpu              2609 drivers/cpufreq/cpufreq.c 	cpufreq_online(cpu);
cpu              2614 drivers/cpufreq/cpufreq.c static int cpuhp_cpufreq_offline(unsigned int cpu)
cpu              2616 drivers/cpufreq/cpufreq.c 	cpufreq_offline(cpu);
cpu               326 drivers/cpufreq/cpufreq_governor.c 	int cpu;
cpu               331 drivers/cpufreq/cpufreq_governor.c 	for_each_cpu(cpu, policy->cpus) {
cpu               332 drivers/cpufreq/cpufreq_governor.c 		struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
cpu               334 drivers/cpufreq/cpufreq_governor.c 		cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
cpu               358 drivers/cpufreq/cpufreq_ondemand.c 	int cpu;
cpu               364 drivers/cpufreq/cpufreq_ondemand.c 	cpu = get_cpu();
cpu               365 drivers/cpufreq/cpufreq_ondemand.c 	idle_time = get_cpu_idle_time_us(cpu, NULL);
cpu               415 drivers/cpufreq/cpufreq_ondemand.c 	unsigned int cpu;
cpu               422 drivers/cpufreq/cpufreq_ondemand.c 	for_each_online_cpu(cpu) {
cpu               428 drivers/cpufreq/cpufreq_ondemand.c 		if (cpumask_test_cpu(cpu, &done))
cpu               431 drivers/cpufreq/cpufreq_ondemand.c 		policy = cpufreq_cpu_get_raw(cpu);
cpu                33 drivers/cpufreq/cpufreq_userspace.c 	pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
cpu                36 drivers/cpufreq/cpufreq_userspace.c 	if (!per_cpu(cpu_is_managed, policy->cpu))
cpu                77 drivers/cpufreq/cpufreq_userspace.c 	pr_debug("started managing cpu %u\n", policy->cpu);
cpu                80 drivers/cpufreq/cpufreq_userspace.c 	per_cpu(cpu_is_managed, policy->cpu) = 1;
cpu                90 drivers/cpufreq/cpufreq_userspace.c 	pr_debug("managing cpu %u stopped\n", policy->cpu);
cpu                93 drivers/cpufreq/cpufreq_userspace.c 	per_cpu(cpu_is_managed, policy->cpu) = 0;
cpu               105 drivers/cpufreq/cpufreq_userspace.c 		 policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
cpu                75 drivers/cpufreq/davinci-cpufreq.c 	if (policy->cpu != 0)
cpu                91 drivers/cpufreq/e_powersaver.c static unsigned int eps_get(unsigned int cpu)
cpu                96 drivers/cpufreq/e_powersaver.c 	if (cpu)
cpu                98 drivers/cpufreq/e_powersaver.c 	centaur = eps_cpu[cpu];
cpu               156 drivers/cpufreq/e_powersaver.c 	unsigned int cpu = policy->cpu;
cpu               160 drivers/cpufreq/e_powersaver.c 	if (unlikely(eps_cpu[cpu] == NULL))
cpu               162 drivers/cpufreq/e_powersaver.c 	centaur = eps_cpu[cpu];
cpu               191 drivers/cpufreq/e_powersaver.c 	if (policy->cpu != 0)
cpu               290 drivers/cpufreq/e_powersaver.c 		if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
cpu               366 drivers/cpufreq/e_powersaver.c 	unsigned int cpu = policy->cpu;
cpu               369 drivers/cpufreq/e_powersaver.c 	kfree(eps_cpu[cpu]);
cpu               370 drivers/cpufreq/e_powersaver.c 	eps_cpu[cpu] = NULL;
cpu                77 drivers/cpufreq/elanfreq.c static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
cpu                71 drivers/cpufreq/freq_table.c 					policy->min, policy->max, policy->cpu);
cpu                93 drivers/cpufreq/freq_table.c 				policy->min, policy->max, policy->cpu);
cpu               130 drivers/cpufreq/freq_table.c 					target_freq, relation, policy->cpu);
cpu               191 drivers/cpufreq/freq_table.c 			WARN(1, "Invalid frequency table: %d\n", policy->cpu);
cpu               202 drivers/cpufreq/gx-suspmod.c static unsigned int gx_get_cpuspeed(unsigned int cpu)
cpu               339 drivers/cpufreq/gx-suspmod.c 	policy->cpu = 0;
cpu               381 drivers/cpufreq/gx-suspmod.c 	policy->cpu = 0;
cpu               402 drivers/cpufreq/gx-suspmod.c 	if (!policy || policy->cpu != 0)
cpu               418 drivers/cpufreq/gx-suspmod.c 	policy->cpu = 0;
cpu                37 drivers/cpufreq/ia64-acpi-cpufreq.c 	unsigned int		cpu;
cpu               109 drivers/cpufreq/ia64-acpi-cpufreq.c 	unsigned int		cpu = req->cpu;
cpu               110 drivers/cpufreq/ia64-acpi-cpufreq.c 	struct cpufreq_acpi_io	*data = acpi_io_data[cpu];
cpu               115 drivers/cpufreq/ia64-acpi-cpufreq.c 	if (smp_processor_id() != cpu)
cpu               133 drivers/cpufreq/ia64-acpi-cpufreq.c 	unsigned int		cpu = req->cpu;
cpu               134 drivers/cpufreq/ia64-acpi-cpufreq.c 	struct cpufreq_acpi_io	*data = acpi_io_data[cpu];
cpu               139 drivers/cpufreq/ia64-acpi-cpufreq.c 	if (smp_processor_id() != cpu)
cpu               176 drivers/cpufreq/ia64-acpi-cpufreq.c 	unsigned int		cpu)
cpu               181 drivers/cpufreq/ia64-acpi-cpufreq.c 	req.cpu = cpu;
cpu               182 drivers/cpufreq/ia64-acpi-cpufreq.c 	ret = work_on_cpu(cpu, processor_get_freq, &req);
cpu               195 drivers/cpufreq/ia64-acpi-cpufreq.c 	req.cpu = policy->cpu;
cpu               198 drivers/cpufreq/ia64-acpi-cpufreq.c 	return work_on_cpu(req.cpu, processor_set_freq, &req);
cpu               206 drivers/cpufreq/ia64-acpi-cpufreq.c 	unsigned int		cpu = policy->cpu;
cpu               217 drivers/cpufreq/ia64-acpi-cpufreq.c 	acpi_io_data[cpu] = data;
cpu               219 drivers/cpufreq/ia64-acpi-cpufreq.c 	result = acpi_processor_register_performance(&data->acpi_data, cpu);
cpu               277 drivers/cpufreq/ia64-acpi-cpufreq.c 	pr_info("CPU%u - ACPI performance management activated\n", cpu);
cpu               296 drivers/cpufreq/ia64-acpi-cpufreq.c 	acpi_processor_unregister_performance(cpu);
cpu               299 drivers/cpufreq/ia64-acpi-cpufreq.c 	acpi_io_data[cpu] = NULL;
cpu               309 drivers/cpufreq/ia64-acpi-cpufreq.c 	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
cpu               314 drivers/cpufreq/ia64-acpi-cpufreq.c 		acpi_io_data[policy->cpu] = NULL;
cpu               315 drivers/cpufreq/ia64-acpi-cpufreq.c 		acpi_processor_unregister_performance(policy->cpu);
cpu               234 drivers/cpufreq/intel_pstate.c 	int cpu;
cpu               342 drivers/cpufreq/intel_pstate.c static void intel_pstate_set_itmt_prio(int cpu)
cpu               348 drivers/cpufreq/intel_pstate.c 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
cpu               357 drivers/cpufreq/intel_pstate.c 	sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
cpu               378 drivers/cpufreq/intel_pstate.c static int intel_pstate_get_cppc_guranteed(int cpu)
cpu               383 drivers/cpufreq/intel_pstate.c 	ret = cppc_get_perf_caps(cpu, &cppc_perf);
cpu               394 drivers/cpufreq/intel_pstate.c static void intel_pstate_set_itmt_prio(int cpu)
cpu               401 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu;
cpu               406 drivers/cpufreq/intel_pstate.c 		intel_pstate_set_itmt_prio(policy->cpu);
cpu               413 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[policy->cpu];
cpu               415 drivers/cpufreq/intel_pstate.c 	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
cpu               416 drivers/cpufreq/intel_pstate.c 						  policy->cpu);
cpu               425 drivers/cpufreq/intel_pstate.c 	if (cpu->acpi_perf_data.control_register.space_id !=
cpu               433 drivers/cpufreq/intel_pstate.c 	if (cpu->acpi_perf_data.state_count < 2)
cpu               436 drivers/cpufreq/intel_pstate.c 	pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
cpu               437 drivers/cpufreq/intel_pstate.c 	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
cpu               439 drivers/cpufreq/intel_pstate.c 			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
cpu               440 drivers/cpufreq/intel_pstate.c 			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
cpu               441 drivers/cpufreq/intel_pstate.c 			 (u32) cpu->acpi_perf_data.states[i].power,
cpu               442 drivers/cpufreq/intel_pstate.c 			 (u32) cpu->acpi_perf_data.states[i].control);
cpu               457 drivers/cpufreq/intel_pstate.c 		cpu->acpi_perf_data.states[0].core_frequency =
cpu               459 drivers/cpufreq/intel_pstate.c 	cpu->valid_pss_table = true;
cpu               465 drivers/cpufreq/intel_pstate.c 	cpu->valid_pss_table = false;
cpu               466 drivers/cpufreq/intel_pstate.c 	acpi_processor_unregister_performance(policy->cpu);
cpu               471 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu;
cpu               473 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[policy->cpu];
cpu               474 drivers/cpufreq/intel_pstate.c 	if (!cpu->valid_pss_table)
cpu               477 drivers/cpufreq/intel_pstate.c 	acpi_processor_unregister_performance(policy->cpu);
cpu               495 drivers/cpufreq/intel_pstate.c static int intel_pstate_get_cppc_guranteed(int cpu)
cpu               504 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu;
cpu               506 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[0];
cpu               510 drivers/cpufreq/intel_pstate.c 		 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
cpu               515 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu = all_cpu_data[0];
cpu               516 drivers/cpufreq/intel_pstate.c 	int turbo_pstate = cpu->pstate.turbo_pstate;
cpu               519 drivers/cpufreq/intel_pstate.c 		(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
cpu               530 drivers/cpufreq/intel_pstate.c 	ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
cpu               547 drivers/cpufreq/intel_pstate.c 			epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
cpu               561 drivers/cpufreq/intel_pstate.c static int intel_pstate_set_epb(int cpu, s16 pref)
cpu               569 drivers/cpufreq/intel_pstate.c 	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
cpu               574 drivers/cpufreq/intel_pstate.c 	wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
cpu               654 drivers/cpufreq/intel_pstate.c 		ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
cpu               664 drivers/cpufreq/intel_pstate.c 		ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
cpu               668 drivers/cpufreq/intel_pstate.c 		ret = intel_pstate_set_epb(cpu_data->cpu, epp);
cpu               695 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
cpu               714 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
cpu               728 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu;
cpu               732 drivers/cpufreq/intel_pstate.c 	ratio = intel_pstate_get_cppc_guranteed(policy->cpu);
cpu               734 drivers/cpufreq/intel_pstate.c 		rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
cpu               738 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[policy->cpu];
cpu               740 drivers/cpufreq/intel_pstate.c 	return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling);
cpu               752 drivers/cpufreq/intel_pstate.c static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
cpu               757 drivers/cpufreq/intel_pstate.c 	rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
cpu               758 drivers/cpufreq/intel_pstate.c 	WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
cpu               767 drivers/cpufreq/intel_pstate.c static void intel_pstate_hwp_set(unsigned int cpu)
cpu               769 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu_data = all_cpu_data[cpu];
cpu               780 drivers/cpufreq/intel_pstate.c 	rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
cpu               830 drivers/cpufreq/intel_pstate.c 		intel_pstate_set_epb(cpu, epp);
cpu               834 drivers/cpufreq/intel_pstate.c 	wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
cpu               837 drivers/cpufreq/intel_pstate.c static void intel_pstate_hwp_force_min_perf(int cpu)
cpu               842 drivers/cpufreq/intel_pstate.c 	value = all_cpu_data[cpu]->hwp_req_cached;
cpu               844 drivers/cpufreq/intel_pstate.c 	min_perf = HWP_LOWEST_PERF(all_cpu_data[cpu]->hwp_cap_cached);
cpu               854 drivers/cpufreq/intel_pstate.c 	wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
cpu               859 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
cpu               878 drivers/cpufreq/intel_pstate.c 	if (policy->cpu == 0)
cpu               879 drivers/cpufreq/intel_pstate.c 		intel_pstate_hwp_enable(all_cpu_data[policy->cpu]);
cpu               881 drivers/cpufreq/intel_pstate.c 	all_cpu_data[policy->cpu]->epp_policy = 0;
cpu               882 drivers/cpufreq/intel_pstate.c 	intel_pstate_hwp_set(policy->cpu);
cpu               891 drivers/cpufreq/intel_pstate.c 	int cpu;
cpu               893 drivers/cpufreq/intel_pstate.c 	for_each_possible_cpu(cpu)
cpu               894 drivers/cpufreq/intel_pstate.c 		cpufreq_update_policy(cpu);
cpu               897 drivers/cpufreq/intel_pstate.c static void intel_pstate_update_max_freq(unsigned int cpu)
cpu               899 drivers/cpufreq/intel_pstate.c 	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
cpu               905 drivers/cpufreq/intel_pstate.c 	cpudata = all_cpu_data[cpu];
cpu               914 drivers/cpufreq/intel_pstate.c static void intel_pstate_update_limits(unsigned int cpu)
cpu               925 drivers/cpufreq/intel_pstate.c 		for_each_possible_cpu(cpu)
cpu               926 drivers/cpufreq/intel_pstate.c 			intel_pstate_update_max_freq(cpu);
cpu               928 drivers/cpufreq/intel_pstate.c 		cpufreq_update_policy(cpu);
cpu               973 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu;
cpu               984 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[0];
cpu               986 drivers/cpufreq/intel_pstate.c 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
cpu               987 drivers/cpufreq/intel_pstate.c 	no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
cpu               999 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu;
cpu              1009 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[0];
cpu              1010 drivers/cpufreq/intel_pstate.c 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
cpu              1070 drivers/cpufreq/intel_pstate.c 		struct cpudata *cpu = all_cpu_data[0];
cpu              1071 drivers/cpufreq/intel_pstate.c 		int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
cpu              1096 drivers/cpufreq/intel_pstate.c 		struct cpudata *cpu = all_cpu_data[i];
cpu              1111 drivers/cpufreq/intel_pstate.c 			turbo_max = cpu->pstate.turbo_pstate;
cpu              1121 drivers/cpufreq/intel_pstate.c 		freq *= cpu->pstate.scaling;
cpu              1282 drivers/cpufreq/intel_pstate.c 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
cpu              1284 drivers/cpufreq/intel_pstate.c 	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
cpu              1293 drivers/cpufreq/intel_pstate.c static void intel_pstate_disable_ee(int cpu)
cpu              1298 drivers/cpufreq/intel_pstate.c 	ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
cpu              1305 drivers/cpufreq/intel_pstate.c 		wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
cpu              1534 drivers/cpufreq/intel_pstate.c static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
cpu              1536 drivers/cpufreq/intel_pstate.c 	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
cpu              1537 drivers/cpufreq/intel_pstate.c 	cpu->pstate.current_pstate = pstate;
cpu              1543 drivers/cpufreq/intel_pstate.c 	wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
cpu              1544 drivers/cpufreq/intel_pstate.c 		      pstate_funcs.get_val(cpu, pstate));
cpu              1547 drivers/cpufreq/intel_pstate.c static void intel_pstate_set_min_pstate(struct cpudata *cpu)
cpu              1549 drivers/cpufreq/intel_pstate.c 	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
cpu              1552 drivers/cpufreq/intel_pstate.c static void intel_pstate_max_within_limits(struct cpudata *cpu)
cpu              1554 drivers/cpufreq/intel_pstate.c 	int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
cpu              1557 drivers/cpufreq/intel_pstate.c 	intel_pstate_set_pstate(cpu, pstate);
cpu              1560 drivers/cpufreq/intel_pstate.c static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu              1562 drivers/cpufreq/intel_pstate.c 	cpu->pstate.min_pstate = pstate_funcs.get_min();
cpu              1563 drivers/cpufreq/intel_pstate.c 	cpu->pstate.max_pstate = pstate_funcs.get_max();
cpu              1564 drivers/cpufreq/intel_pstate.c 	cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
cpu              1565 drivers/cpufreq/intel_pstate.c 	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu              1566 drivers/cpufreq/intel_pstate.c 	cpu->pstate.scaling = pstate_funcs.get_scaling();
cpu              1567 drivers/cpufreq/intel_pstate.c 	cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
cpu              1572 drivers/cpufreq/intel_pstate.c 		intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
cpu              1573 drivers/cpufreq/intel_pstate.c 		cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
cpu              1575 drivers/cpufreq/intel_pstate.c 		cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
cpu              1579 drivers/cpufreq/intel_pstate.c 		cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
cpu              1582 drivers/cpufreq/intel_pstate.c 		pstate_funcs.get_vid(cpu);
cpu              1584 drivers/cpufreq/intel_pstate.c 	intel_pstate_set_min_pstate(cpu);
cpu              1595 drivers/cpufreq/intel_pstate.c static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
cpu              1597 drivers/cpufreq/intel_pstate.c 	u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
cpu              1617 drivers/cpufreq/intel_pstate.c 	if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
cpu              1620 drivers/cpufreq/intel_pstate.c 	if (!cpu->hwp_boost_min)
cpu              1621 drivers/cpufreq/intel_pstate.c 		cpu->hwp_boost_min = min_limit;
cpu              1624 drivers/cpufreq/intel_pstate.c 	boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1;
cpu              1626 drivers/cpufreq/intel_pstate.c 	if (cpu->hwp_boost_min < boost_level1)
cpu              1627 drivers/cpufreq/intel_pstate.c 		cpu->hwp_boost_min = boost_level1;
cpu              1628 drivers/cpufreq/intel_pstate.c 	else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
cpu              1629 drivers/cpufreq/intel_pstate.c 		cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached);
cpu              1630 drivers/cpufreq/intel_pstate.c 	else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) &&
cpu              1631 drivers/cpufreq/intel_pstate.c 		 max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
cpu              1632 drivers/cpufreq/intel_pstate.c 		cpu->hwp_boost_min = max_limit;
cpu              1636 drivers/cpufreq/intel_pstate.c 	hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
cpu              1638 drivers/cpufreq/intel_pstate.c 	cpu->last_update = cpu->sample.time;
cpu              1641 drivers/cpufreq/intel_pstate.c static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
cpu              1643 drivers/cpufreq/intel_pstate.c 	if (cpu->hwp_boost_min) {
cpu              1647 drivers/cpufreq/intel_pstate.c 		expired = time_after64(cpu->sample.time, cpu->last_update +
cpu              1650 drivers/cpufreq/intel_pstate.c 			wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
cpu              1651 drivers/cpufreq/intel_pstate.c 			cpu->hwp_boost_min = 0;
cpu              1654 drivers/cpufreq/intel_pstate.c 	cpu->last_update = cpu->sample.time;
cpu              1657 drivers/cpufreq/intel_pstate.c static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
cpu              1660 drivers/cpufreq/intel_pstate.c 	cpu->sample.time = time;
cpu              1662 drivers/cpufreq/intel_pstate.c 	if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
cpu              1665 drivers/cpufreq/intel_pstate.c 		cpu->sched_flags = 0;
cpu              1673 drivers/cpufreq/intel_pstate.c 		if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
cpu              1676 drivers/cpufreq/intel_pstate.c 		cpu->last_io_update = time;
cpu              1679 drivers/cpufreq/intel_pstate.c 			intel_pstate_hwp_boost_up(cpu);
cpu              1682 drivers/cpufreq/intel_pstate.c 		intel_pstate_hwp_boost_down(cpu);
cpu              1689 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
cpu              1691 drivers/cpufreq/intel_pstate.c 	cpu->sched_flags |= flags;
cpu              1693 drivers/cpufreq/intel_pstate.c 	if (smp_processor_id() == cpu->cpu)
cpu              1694 drivers/cpufreq/intel_pstate.c 		intel_pstate_update_util_hwp_local(cpu, time);
cpu              1697 drivers/cpufreq/intel_pstate.c static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
cpu              1699 drivers/cpufreq/intel_pstate.c 	struct sample *sample = &cpu->sample;
cpu              1704 drivers/cpufreq/intel_pstate.c static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
cpu              1714 drivers/cpufreq/intel_pstate.c 	if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
cpu              1720 drivers/cpufreq/intel_pstate.c 	cpu->last_sample_time = cpu->sample.time;
cpu              1721 drivers/cpufreq/intel_pstate.c 	cpu->sample.time = time;
cpu              1722 drivers/cpufreq/intel_pstate.c 	cpu->sample.aperf = aperf;
cpu              1723 drivers/cpufreq/intel_pstate.c 	cpu->sample.mperf = mperf;
cpu              1724 drivers/cpufreq/intel_pstate.c 	cpu->sample.tsc =  tsc;
cpu              1725 drivers/cpufreq/intel_pstate.c 	cpu->sample.aperf -= cpu->prev_aperf;
cpu              1726 drivers/cpufreq/intel_pstate.c 	cpu->sample.mperf -= cpu->prev_mperf;
cpu              1727 drivers/cpufreq/intel_pstate.c 	cpu->sample.tsc -= cpu->prev_tsc;
cpu              1729 drivers/cpufreq/intel_pstate.c 	cpu->prev_aperf = aperf;
cpu              1730 drivers/cpufreq/intel_pstate.c 	cpu->prev_mperf = mperf;
cpu              1731 drivers/cpufreq/intel_pstate.c 	cpu->prev_tsc = tsc;
cpu              1739 drivers/cpufreq/intel_pstate.c 	if (cpu->last_sample_time) {
cpu              1740 drivers/cpufreq/intel_pstate.c 		intel_pstate_calc_avg_perf(cpu);
cpu              1746 drivers/cpufreq/intel_pstate.c static inline int32_t get_avg_frequency(struct cpudata *cpu)
cpu              1748 drivers/cpufreq/intel_pstate.c 	return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
cpu              1751 drivers/cpufreq/intel_pstate.c static inline int32_t get_avg_pstate(struct cpudata *cpu)
cpu              1753 drivers/cpufreq/intel_pstate.c 	return mul_ext_fp(cpu->pstate.max_pstate_physical,
cpu              1754 drivers/cpufreq/intel_pstate.c 			  cpu->sample.core_avg_perf);
cpu              1757 drivers/cpufreq/intel_pstate.c static inline int32_t get_target_pstate(struct cpudata *cpu)
cpu              1759 drivers/cpufreq/intel_pstate.c 	struct sample *sample = &cpu->sample;
cpu              1763 drivers/cpufreq/intel_pstate.c 	busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
cpu              1766 drivers/cpufreq/intel_pstate.c 	if (busy_frac < cpu->iowait_boost)
cpu              1767 drivers/cpufreq/intel_pstate.c 		busy_frac = cpu->iowait_boost;
cpu              1772 drivers/cpufreq/intel_pstate.c 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
cpu              1775 drivers/cpufreq/intel_pstate.c 	if (target < cpu->pstate.min_pstate)
cpu              1776 drivers/cpufreq/intel_pstate.c 		target = cpu->pstate.min_pstate;
cpu              1785 drivers/cpufreq/intel_pstate.c 	avg_pstate = get_avg_pstate(cpu);
cpu              1792 drivers/cpufreq/intel_pstate.c static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
cpu              1794 drivers/cpufreq/intel_pstate.c 	int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
cpu              1795 drivers/cpufreq/intel_pstate.c 	int max_pstate = max(min_pstate, cpu->max_perf_ratio);
cpu              1800 drivers/cpufreq/intel_pstate.c static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
cpu              1802 drivers/cpufreq/intel_pstate.c 	if (pstate == cpu->pstate.current_pstate)
cpu              1805 drivers/cpufreq/intel_pstate.c 	cpu->pstate.current_pstate = pstate;
cpu              1806 drivers/cpufreq/intel_pstate.c 	wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
cpu              1809 drivers/cpufreq/intel_pstate.c static void intel_pstate_adjust_pstate(struct cpudata *cpu)
cpu              1811 drivers/cpufreq/intel_pstate.c 	int from = cpu->pstate.current_pstate;
cpu              1817 drivers/cpufreq/intel_pstate.c 	target_pstate = get_target_pstate(cpu);
cpu              1818 drivers/cpufreq/intel_pstate.c 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
cpu              1819 drivers/cpufreq/intel_pstate.c 	trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
cpu              1820 drivers/cpufreq/intel_pstate.c 	intel_pstate_update_pstate(cpu, target_pstate);
cpu              1822 drivers/cpufreq/intel_pstate.c 	sample = &cpu->sample;
cpu              1826 drivers/cpufreq/intel_pstate.c 		cpu->pstate.current_pstate,
cpu              1830 drivers/cpufreq/intel_pstate.c 		get_avg_frequency(cpu),
cpu              1831 drivers/cpufreq/intel_pstate.c 		fp_toint(cpu->iowait_boost * 100));
cpu              1837 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
cpu              1841 drivers/cpufreq/intel_pstate.c 	if (smp_processor_id() != cpu->cpu)
cpu              1844 drivers/cpufreq/intel_pstate.c 	delta_ns = time - cpu->last_update;
cpu              1848 drivers/cpufreq/intel_pstate.c 			cpu->iowait_boost = ONE_EIGHTH_FP;
cpu              1849 drivers/cpufreq/intel_pstate.c 		} else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
cpu              1850 drivers/cpufreq/intel_pstate.c 			cpu->iowait_boost <<= 1;
cpu              1851 drivers/cpufreq/intel_pstate.c 			if (cpu->iowait_boost > int_tofp(1))
cpu              1852 drivers/cpufreq/intel_pstate.c 				cpu->iowait_boost = int_tofp(1);
cpu              1854 drivers/cpufreq/intel_pstate.c 			cpu->iowait_boost = ONE_EIGHTH_FP;
cpu              1856 drivers/cpufreq/intel_pstate.c 	} else if (cpu->iowait_boost) {
cpu              1859 drivers/cpufreq/intel_pstate.c 			cpu->iowait_boost = 0;
cpu              1861 drivers/cpufreq/intel_pstate.c 			cpu->iowait_boost >>= 1;
cpu              1863 drivers/cpufreq/intel_pstate.c 	cpu->last_update = time;
cpu              1864 drivers/cpufreq/intel_pstate.c 	delta_ns = time - cpu->sample.time;
cpu              1868 drivers/cpufreq/intel_pstate.c 	if (intel_pstate_sample(cpu, time))
cpu              1869 drivers/cpufreq/intel_pstate.c 		intel_pstate_adjust_pstate(cpu);
cpu              1961 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu;
cpu              1963 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[cpunum];
cpu              1965 drivers/cpufreq/intel_pstate.c 	if (!cpu) {
cpu              1966 drivers/cpufreq/intel_pstate.c 		cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
cpu              1967 drivers/cpufreq/intel_pstate.c 		if (!cpu)
cpu              1970 drivers/cpufreq/intel_pstate.c 		all_cpu_data[cpunum] = cpu;
cpu              1972 drivers/cpufreq/intel_pstate.c 		cpu->epp_default = -EINVAL;
cpu              1973 drivers/cpufreq/intel_pstate.c 		cpu->epp_powersave = -EINVAL;
cpu              1974 drivers/cpufreq/intel_pstate.c 		cpu->epp_saved = -EINVAL;
cpu              1977 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[cpunum];
cpu              1979 drivers/cpufreq/intel_pstate.c 	cpu->cpu = cpunum;
cpu              1988 drivers/cpufreq/intel_pstate.c 		intel_pstate_hwp_enable(cpu);
cpu              1995 drivers/cpufreq/intel_pstate.c 	intel_pstate_get_cpu_pstates(cpu);
cpu              2004 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu = all_cpu_data[cpu_num];
cpu              2009 drivers/cpufreq/intel_pstate.c 	if (cpu->update_util_set)
cpu              2013 drivers/cpufreq/intel_pstate.c 	cpu->sample.time = 0;
cpu              2014 drivers/cpufreq/intel_pstate.c 	cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
cpu              2018 drivers/cpufreq/intel_pstate.c 	cpu->update_util_set = true;
cpu              2021 drivers/cpufreq/intel_pstate.c static void intel_pstate_clear_update_util_hook(unsigned int cpu)
cpu              2023 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu_data = all_cpu_data[cpu];
cpu              2028 drivers/cpufreq/intel_pstate.c 	cpufreq_remove_update_util_hook(cpu);
cpu              2033 drivers/cpufreq/intel_pstate.c static int intel_pstate_get_max_freq(struct cpudata *cpu)
cpu              2036 drivers/cpufreq/intel_pstate.c 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
cpu              2039 drivers/cpufreq/intel_pstate.c static void intel_pstate_update_perf_limits(struct cpudata *cpu,
cpu              2043 drivers/cpufreq/intel_pstate.c 	int max_freq = intel_pstate_get_max_freq(cpu);
cpu              2053 drivers/cpufreq/intel_pstate.c 		intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
cpu              2056 drivers/cpufreq/intel_pstate.c 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
cpu              2057 drivers/cpufreq/intel_pstate.c 		turbo_max = cpu->pstate.turbo_pstate;
cpu              2070 drivers/cpufreq/intel_pstate.c 		 cpu->cpu, max_state, min_policy_perf, max_policy_perf);
cpu              2074 drivers/cpufreq/intel_pstate.c 		cpu->min_perf_ratio = min_policy_perf;
cpu              2075 drivers/cpufreq/intel_pstate.c 		cpu->max_perf_ratio = max_policy_perf;
cpu              2084 drivers/cpufreq/intel_pstate.c 		pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
cpu              2087 drivers/cpufreq/intel_pstate.c 		cpu->min_perf_ratio = max(min_policy_perf, global_min);
cpu              2088 drivers/cpufreq/intel_pstate.c 		cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
cpu              2089 drivers/cpufreq/intel_pstate.c 		cpu->max_perf_ratio = min(max_policy_perf, global_max);
cpu              2090 drivers/cpufreq/intel_pstate.c 		cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
cpu              2093 drivers/cpufreq/intel_pstate.c 		cpu->min_perf_ratio = min(cpu->min_perf_ratio,
cpu              2094 drivers/cpufreq/intel_pstate.c 					  cpu->max_perf_ratio);
cpu              2097 drivers/cpufreq/intel_pstate.c 	pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
cpu              2098 drivers/cpufreq/intel_pstate.c 		 cpu->max_perf_ratio,
cpu              2099 drivers/cpufreq/intel_pstate.c 		 cpu->min_perf_ratio);
cpu              2104 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu;
cpu              2112 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[policy->cpu];
cpu              2113 drivers/cpufreq/intel_pstate.c 	cpu->policy = policy->policy;
cpu              2117 drivers/cpufreq/intel_pstate.c 	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
cpu              2119 drivers/cpufreq/intel_pstate.c 	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
cpu              2124 drivers/cpufreq/intel_pstate.c 		intel_pstate_clear_update_util_hook(policy->cpu);
cpu              2125 drivers/cpufreq/intel_pstate.c 		intel_pstate_max_within_limits(cpu);
cpu              2127 drivers/cpufreq/intel_pstate.c 		intel_pstate_set_update_util_hook(policy->cpu);
cpu              2137 drivers/cpufreq/intel_pstate.c 			intel_pstate_clear_update_util_hook(policy->cpu);
cpu              2138 drivers/cpufreq/intel_pstate.c 		intel_pstate_hwp_set(policy->cpu);
cpu              2146 drivers/cpufreq/intel_pstate.c static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
cpu              2150 drivers/cpufreq/intel_pstate.c 	    cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
cpu              2152 drivers/cpufreq/intel_pstate.c 	    policy->max > cpu->pstate.max_freq) {
cpu              2160 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu = all_cpu_data[policy->cpu];
cpu              2164 drivers/cpufreq/intel_pstate.c 				     intel_pstate_get_max_freq(cpu));
cpu              2166 drivers/cpufreq/intel_pstate.c 	intel_pstate_adjust_policy_max(cpu, policy);
cpu              2173 drivers/cpufreq/intel_pstate.c 	intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
cpu              2178 drivers/cpufreq/intel_pstate.c 	pr_debug("CPU %d exiting\n", policy->cpu);
cpu              2180 drivers/cpufreq/intel_pstate.c 	intel_pstate_clear_update_util_hook(policy->cpu);
cpu              2183 drivers/cpufreq/intel_pstate.c 		intel_pstate_hwp_force_min_perf(policy->cpu);
cpu              2200 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu;
cpu              2203 drivers/cpufreq/intel_pstate.c 	rc = intel_pstate_init_cpu(policy->cpu);
cpu              2207 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[policy->cpu];
cpu              2209 drivers/cpufreq/intel_pstate.c 	cpu->max_perf_ratio = 0xFF;
cpu              2210 drivers/cpufreq/intel_pstate.c 	cpu->min_perf_ratio = 0;
cpu              2212 drivers/cpufreq/intel_pstate.c 	policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
cpu              2213 drivers/cpufreq/intel_pstate.c 	policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
cpu              2216 drivers/cpufreq/intel_pstate.c 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
cpu              2220 drivers/cpufreq/intel_pstate.c 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
cpu              2221 drivers/cpufreq/intel_pstate.c 	policy->cpuinfo.max_freq *= cpu->pstate.scaling;
cpu              2227 drivers/cpufreq/intel_pstate.c 			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
cpu              2269 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu = all_cpu_data[policy->cpu];
cpu              2273 drivers/cpufreq/intel_pstate.c 				     intel_pstate_get_max_freq(cpu));
cpu              2275 drivers/cpufreq/intel_pstate.c 	intel_pstate_adjust_policy_max(cpu, policy);
cpu              2277 drivers/cpufreq/intel_pstate.c 	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
cpu              2298 drivers/cpufreq/intel_pstate.c static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
cpu              2305 drivers/cpufreq/intel_pstate.c 	if (!intel_pstate_sample(cpu, ktime_get()))
cpu              2308 drivers/cpufreq/intel_pstate.c 	sample = &cpu->sample;
cpu              2312 drivers/cpufreq/intel_pstate.c 		cpu->pstate.current_pstate,
cpu              2316 drivers/cpufreq/intel_pstate.c 		get_avg_frequency(cpu),
cpu              2317 drivers/cpufreq/intel_pstate.c 		fp_toint(cpu->iowait_boost * 100));
cpu              2324 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu = all_cpu_data[policy->cpu];
cpu              2336 drivers/cpufreq/intel_pstate.c 		target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
cpu              2339 drivers/cpufreq/intel_pstate.c 		target_pstate = freqs.new / cpu->pstate.scaling;
cpu              2342 drivers/cpufreq/intel_pstate.c 		target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
cpu              2345 drivers/cpufreq/intel_pstate.c 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
cpu              2346 drivers/cpufreq/intel_pstate.c 	old_pstate = cpu->pstate.current_pstate;
cpu              2347 drivers/cpufreq/intel_pstate.c 	if (target_pstate != cpu->pstate.current_pstate) {
cpu              2348 drivers/cpufreq/intel_pstate.c 		cpu->pstate.current_pstate = target_pstate;
cpu              2349 drivers/cpufreq/intel_pstate.c 		wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
cpu              2350 drivers/cpufreq/intel_pstate.c 			      pstate_funcs.get_val(cpu, target_pstate));
cpu              2352 drivers/cpufreq/intel_pstate.c 	freqs.new = target_pstate * cpu->pstate.scaling;
cpu              2353 drivers/cpufreq/intel_pstate.c 	intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate);
cpu              2362 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu = all_cpu_data[policy->cpu];
cpu              2367 drivers/cpufreq/intel_pstate.c 	target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
cpu              2368 drivers/cpufreq/intel_pstate.c 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
cpu              2369 drivers/cpufreq/intel_pstate.c 	old_pstate = cpu->pstate.current_pstate;
cpu              2370 drivers/cpufreq/intel_pstate.c 	intel_pstate_update_pstate(cpu, target_pstate);
cpu              2371 drivers/cpufreq/intel_pstate.c 	intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
cpu              2372 drivers/cpufreq/intel_pstate.c 	return target_pstate * cpu->pstate.scaling;
cpu              2379 drivers/cpufreq/intel_pstate.c 	struct cpudata *cpu;
cpu              2382 drivers/cpufreq/intel_pstate.c 	dev = get_cpu_device(policy->cpu);
cpu              2401 drivers/cpufreq/intel_pstate.c 	cpu = all_cpu_data[policy->cpu];
cpu              2404 drivers/cpufreq/intel_pstate.c 		intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
cpu              2406 drivers/cpufreq/intel_pstate.c 		turbo_max = cpu->pstate.turbo_pstate;
cpu              2409 drivers/cpufreq/intel_pstate.c 	min_freq *= cpu->pstate.scaling;
cpu              2411 drivers/cpufreq/intel_pstate.c 	max_freq *= cpu->pstate.scaling;
cpu              2470 drivers/cpufreq/intel_pstate.c 	unsigned int cpu;
cpu              2473 drivers/cpufreq/intel_pstate.c 	for_each_online_cpu(cpu) {
cpu              2474 drivers/cpufreq/intel_pstate.c 		if (all_cpu_data[cpu]) {
cpu              2476 drivers/cpufreq/intel_pstate.c 				intel_pstate_clear_update_util_hook(cpu);
cpu              2478 drivers/cpufreq/intel_pstate.c 			kfree(all_cpu_data[cpu]);
cpu              2479 drivers/cpufreq/intel_pstate.c 			all_cpu_data[cpu] = NULL;
cpu                45 drivers/cpufreq/kirkwood-cpufreq.c static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
cpu               661 drivers/cpufreq/longhaul.c static unsigned int longhaul_get(unsigned int cpu)
cpu               663 drivers/cpufreq/longhaul.c 	if (cpu)
cpu                60 drivers/cpufreq/longrun.c 	policy->cpu = 0;
cpu               130 drivers/cpufreq/longrun.c 	policy->cpu = 0;
cpu               136 drivers/cpufreq/longrun.c static unsigned int longrun_get(unsigned int cpu)
cpu               140 drivers/cpufreq/longrun.c 	if (cpu)
cpu               257 drivers/cpufreq/longrun.c 	if (policy->cpu != 0)
cpu                52 drivers/cpufreq/loongson1-cpufreq.c 	struct device *cpu_dev = get_cpu_device(policy->cpu);
cpu                81 drivers/cpufreq/loongson1-cpufreq.c 	struct device *cpu_dev = get_cpu_device(policy->cpu);
cpu               136 drivers/cpufreq/maple-cpufreq.c static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
cpu                49 drivers/cpufreq/mediatek-cpufreq.c static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
cpu                54 drivers/cpufreq/mediatek-cpufreq.c 		if (cpumask_test_cpu(cpu, &info->cpus))
cpu               228 drivers/cpufreq/mediatek-cpufreq.c 		       policy->cpu, freq_hz);
cpu               243 drivers/cpufreq/mediatek-cpufreq.c 			       policy->cpu);
cpu               253 drivers/cpufreq/mediatek-cpufreq.c 		       policy->cpu);
cpu               263 drivers/cpufreq/mediatek-cpufreq.c 		       policy->cpu);
cpu               273 drivers/cpufreq/mediatek-cpufreq.c 		       policy->cpu);
cpu               287 drivers/cpufreq/mediatek-cpufreq.c 			       policy->cpu);
cpu               300 drivers/cpufreq/mediatek-cpufreq.c static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
cpu               311 drivers/cpufreq/mediatek-cpufreq.c 	cpu_dev = get_cpu_device(cpu);
cpu               313 drivers/cpufreq/mediatek-cpufreq.c 		pr_err("failed to get cpu%d device\n", cpu);
cpu               320 drivers/cpufreq/mediatek-cpufreq.c 			pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
cpu               322 drivers/cpufreq/mediatek-cpufreq.c 			pr_err("failed to get cpu clk for cpu%d\n", cpu);
cpu               332 drivers/cpufreq/mediatek-cpufreq.c 				cpu);
cpu               335 drivers/cpufreq/mediatek-cpufreq.c 			       cpu);
cpu               345 drivers/cpufreq/mediatek-cpufreq.c 				cpu);
cpu               348 drivers/cpufreq/mediatek-cpufreq.c 			       cpu);
cpu               361 drivers/cpufreq/mediatek-cpufreq.c 		       cpu);
cpu               367 drivers/cpufreq/mediatek-cpufreq.c 		pr_warn("no OPP table for cpu%d\n", cpu);
cpu               375 drivers/cpufreq/mediatek-cpufreq.c 		pr_err("failed to get intermediate opp for cpu%d\n", cpu);
cpu               432 drivers/cpufreq/mediatek-cpufreq.c 	info = mtk_cpu_dvfs_info_lookup(policy->cpu);
cpu               435 drivers/cpufreq/mediatek-cpufreq.c 		       policy->cpu);
cpu               442 drivers/cpufreq/mediatek-cpufreq.c 		       policy->cpu, ret);
cpu               481 drivers/cpufreq/mediatek-cpufreq.c 	int cpu, ret;
cpu               483 drivers/cpufreq/mediatek-cpufreq.c 	for_each_possible_cpu(cpu) {
cpu               484 drivers/cpufreq/mediatek-cpufreq.c 		info = mtk_cpu_dvfs_info_lookup(cpu);
cpu               494 drivers/cpufreq/mediatek-cpufreq.c 		ret = mtk_cpu_dvfs_info_init(info, cpu);
cpu               498 drivers/cpufreq/mediatek-cpufreq.c 				cpu);
cpu                30 drivers/cpufreq/mvebu-cpufreq.c 	int ret, cpu;
cpu                62 drivers/cpufreq/mvebu-cpufreq.c 	for_each_possible_cpu(cpu) {
cpu                67 drivers/cpufreq/mvebu-cpufreq.c 		cpu_dev = get_cpu_device(cpu);
cpu                69 drivers/cpufreq/mvebu-cpufreq.c 			pr_err("Cannot get CPU %d\n", cpu);
cpu                75 drivers/cpufreq/mvebu-cpufreq.c 			pr_err("Cannot get clock for CPU %d\n", cpu);
cpu               124 drivers/cpufreq/omap-cpufreq.c 				__func__, policy->cpu, result);
cpu                50 drivers/cpufreq/p4-clockmod.c static unsigned int cpufreq_p4_get(unsigned int cpu);
cpu                52 drivers/cpufreq/p4-clockmod.c static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
cpu                59 drivers/cpufreq/p4-clockmod.c 	rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
cpu                62 drivers/cpufreq/p4-clockmod.c 		pr_debug("CPU#%d currently thermal throttled\n", cpu);
cpu                64 drivers/cpufreq/p4-clockmod.c 	if (has_N44_O17_errata[cpu] &&
cpu                68 drivers/cpufreq/p4-clockmod.c 	rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
cpu                70 drivers/cpufreq/p4-clockmod.c 		pr_debug("CPU#%d disabling modulation\n", cpu);
cpu                71 drivers/cpufreq/p4-clockmod.c 		wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
cpu                74 drivers/cpufreq/p4-clockmod.c 			cpu, ((125 * newstate) / 10));
cpu                82 drivers/cpufreq/p4-clockmod.c 		wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
cpu               157 drivers/cpufreq/p4-clockmod.c 	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
cpu               162 drivers/cpufreq/p4-clockmod.c 	cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
cpu               172 drivers/cpufreq/p4-clockmod.c 		has_N44_O17_errata[policy->cpu] = 1;
cpu               179 drivers/cpufreq/p4-clockmod.c 		cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
cpu               189 drivers/cpufreq/p4-clockmod.c 		if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
cpu               206 drivers/cpufreq/p4-clockmod.c static unsigned int cpufreq_p4_get(unsigned int cpu)
cpu               210 drivers/cpufreq/p4-clockmod.c 	rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
cpu                69 drivers/cpufreq/pasemi-cpufreq.c static int get_cur_astate(int cpu)
cpu                74 drivers/cpufreq/pasemi-cpufreq.c 	ret = (ret >> (cpu * 4)) & 0x7;
cpu                94 drivers/cpufreq/pasemi-cpufreq.c static void set_astate(int cpu, unsigned int astate)
cpu               104 drivers/cpufreq/pasemi-cpufreq.c 	out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate);
cpu               114 drivers/cpufreq/pasemi-cpufreq.c void restore_astate(int cpu)
cpu               116 drivers/cpufreq/pasemi-cpufreq.c 	set_astate(cpu, current_astate);
cpu               130 drivers/cpufreq/pasemi-cpufreq.c 	struct device_node *cpu, *dn;
cpu               133 drivers/cpufreq/pasemi-cpufreq.c 	cpu = of_get_cpu_node(policy->cpu, NULL);
cpu               134 drivers/cpufreq/pasemi-cpufreq.c 	if (!cpu)
cpu               137 drivers/cpufreq/pasemi-cpufreq.c 	max_freqp = of_get_property(cpu, "clock-frequency", NULL);
cpu               138 drivers/cpufreq/pasemi-cpufreq.c 	of_node_put(cpu);
cpu               181 drivers/cpufreq/pasemi-cpufreq.c 	pr_debug("init cpufreq on CPU %d\n", policy->cpu);
cpu               191 drivers/cpufreq/pasemi-cpufreq.c 	cur_astate = get_cur_astate(policy->cpu);
cpu               229 drivers/cpufreq/pasemi-cpufreq.c 		 policy->cpu,
cpu               140 drivers/cpufreq/pcc-cpufreq.c static unsigned int pcc_get_freq(unsigned int cpu)
cpu               151 drivers/cpufreq/pcc-cpufreq.c 	pr_debug("get: get_freq for CPU %d\n", cpu);
cpu               152 drivers/cpufreq/pcc-cpufreq.c 	pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
cpu               170 drivers/cpufreq/pcc-cpufreq.c 			cpu, status);
cpu               179 drivers/cpufreq/pcc-cpufreq.c 		cpu, (pcch_virt_addr + pcc_cpu_data->output_offset),
cpu               185 drivers/cpufreq/pcc-cpufreq.c 			" capped at %d\n", cpu, curr_freq);
cpu               205 drivers/cpufreq/pcc-cpufreq.c 	int cpu;
cpu               207 drivers/cpufreq/pcc-cpufreq.c 	cpu = policy->cpu;
cpu               208 drivers/cpufreq/pcc-cpufreq.c 	pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
cpu               212 drivers/cpufreq/pcc-cpufreq.c 		cpu, target_freq,
cpu               239 drivers/cpufreq/pcc-cpufreq.c 			cpu, status);
cpu               243 drivers/cpufreq/pcc-cpufreq.c 	pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
cpu               248 drivers/cpufreq/pcc-cpufreq.c static int pcc_get_offset(int cpu)
cpu               257 drivers/cpufreq/pcc-cpufreq.c 	pr = per_cpu(processors, cpu);
cpu               258 drivers/cpufreq/pcc-cpufreq.c 	pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
cpu               294 drivers/cpufreq/pcc-cpufreq.c 		cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset);
cpu               539 drivers/cpufreq/pcc-cpufreq.c 	unsigned int cpu = policy->cpu;
cpu               547 drivers/cpufreq/pcc-cpufreq.c 	result = pcc_get_offset(cpu);
cpu               357 drivers/cpufreq/pmac32-cpufreq.c static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
cpu               317 drivers/cpufreq/pmac64-cpufreq.c static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
cpu               159 drivers/cpufreq/powernow-k6.c 	if (policy->cpu != 0)
cpu               243 drivers/cpufreq/powernow-k6.c static unsigned int powernow_k6_get(unsigned int cpu)
cpu               553 drivers/cpufreq/powernow-k7.c static unsigned int powernow_get(unsigned int cpu)
cpu               558 drivers/cpufreq/powernow-k7.c 	if (cpu)
cpu               598 drivers/cpufreq/powernow-k7.c 	if (policy->cpu != 0)
cpu               609 drivers/cpufreq/powernow-k8.c 	if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
cpu               732 drivers/cpufreq/powernow-k8.c 	if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
cpu               771 drivers/cpufreq/powernow-k8.c 	if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
cpu               789 drivers/cpufreq/powernow-k8.c 	acpi_processor_unregister_performance(data->cpu);
cpu               857 drivers/cpufreq/powernow-k8.c 		acpi_processor_unregister_performance(data->cpu);
cpu               935 drivers/cpufreq/powernow-k8.c 	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
cpu               952 drivers/cpufreq/powernow-k8.c 		pol->cpu, data->powernow_table[newstate].frequency, pol->min,
cpu               991 drivers/cpufreq/powernow-k8.c 	return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
cpu              1029 drivers/cpufreq/powernow-k8.c 	int rc, cpu;
cpu              1031 drivers/cpufreq/powernow-k8.c 	smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
cpu              1039 drivers/cpufreq/powernow-k8.c 	data->cpu = pol->cpu;
cpu              1050 drivers/cpufreq/powernow-k8.c 		if (pol->cpu != 0) {
cpu              1068 drivers/cpufreq/powernow-k8.c 	smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
cpu              1074 drivers/cpufreq/powernow-k8.c 	cpumask_copy(pol->cpus, topology_core_cpumask(pol->cpu));
cpu              1082 drivers/cpufreq/powernow-k8.c 	for_each_cpu(cpu, pol->cpus)
cpu              1083 drivers/cpufreq/powernow-k8.c 		per_cpu(powernow_data, cpu) = data;
cpu              1097 drivers/cpufreq/powernow-k8.c 	struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
cpu              1098 drivers/cpufreq/powernow-k8.c 	int cpu;
cpu              1107 drivers/cpufreq/powernow-k8.c 	for_each_cpu(cpu, pol->cpus)
cpu              1108 drivers/cpufreq/powernow-k8.c 		per_cpu(powernow_data, cpu) = NULL;
cpu              1121 drivers/cpufreq/powernow-k8.c static unsigned int powernowk8_get(unsigned int cpu)
cpu              1123 drivers/cpufreq/powernow-k8.c 	struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
cpu              1130 drivers/cpufreq/powernow-k8.c 	smp_call_function_single(cpu, query_values_on_cpu, &err, true);
cpu                 7 drivers/cpufreq/powernow-k8.h 	unsigned int cpu;
cpu               398 drivers/cpufreq/powernv-cpufreq.c 	struct chip *chip = per_cpu(chip_info, policy->cpu);		\
cpu               504 drivers/cpufreq/powernv-cpufreq.c static unsigned int powernv_cpufreq_get(unsigned int cpu)
cpu               508 drivers/cpufreq/powernv-cpufreq.c 	smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
cpu               556 drivers/cpufreq/powernv-cpufreq.c 	unsigned int cpu = smp_processor_id();
cpu               573 drivers/cpufreq/powernv-cpufreq.c 				     cpu, chip->id, pmsr_pmax,
cpu               832 drivers/cpufreq/powernv-cpufreq.c 	base = cpu_first_thread_sibling(policy->cpu);
cpu               844 drivers/cpufreq/powernv-cpufreq.c 				policy->cpu);
cpu               886 drivers/cpufreq/powernv-cpufreq.c 	int cpu;
cpu               890 drivers/cpufreq/powernv-cpufreq.c 	for_each_online_cpu(cpu) {
cpu               891 drivers/cpufreq/powernv-cpufreq.c 		cpufreq_get_policy(&cpu_policy, cpu);
cpu               905 drivers/cpufreq/powernv-cpufreq.c 	unsigned int cpu;
cpu               917 drivers/cpufreq/powernv-cpufreq.c 	for_each_cpu(cpu, &mask) {
cpu               921 drivers/cpufreq/powernv-cpufreq.c 		cpufreq_get_policy(&policy, cpu);
cpu              1010 drivers/cpufreq/powernv-cpufreq.c 	smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
cpu              1045 drivers/cpufreq/powernv-cpufreq.c 	unsigned int cpu, i;
cpu              1053 drivers/cpufreq/powernv-cpufreq.c 	for_each_possible_cpu(cpu) {
cpu              1054 drivers/cpufreq/powernv-cpufreq.c 		unsigned int id = cpu_to_chip_id(cpu);
cpu              1072 drivers/cpufreq/powernv-cpufreq.c 		for_each_cpu(cpu, &chips[i].mask)
cpu              1073 drivers/cpufreq/powernv-cpufreq.c 			per_cpu(chip_info, cpu) =  &chips[i];
cpu                37 drivers/cpufreq/ppc_cbe_cpufreq.c static int set_pmode(unsigned int cpu, unsigned int slow_mode)
cpu                42 drivers/cpufreq/ppc_cbe_cpufreq.c 		rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
cpu                44 drivers/cpufreq/ppc_cbe_cpufreq.c 		rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
cpu                46 drivers/cpufreq/ppc_cbe_cpufreq.c 	pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
cpu                61 drivers/cpufreq/ppc_cbe_cpufreq.c 	struct device_node *cpu;
cpu                63 drivers/cpufreq/ppc_cbe_cpufreq.c 	cpu = of_get_cpu_node(policy->cpu, NULL);
cpu                65 drivers/cpufreq/ppc_cbe_cpufreq.c 	if (!cpu)
cpu                68 drivers/cpufreq/ppc_cbe_cpufreq.c 	pr_debug("init cpufreq on CPU %d\n", policy->cpu);
cpu                73 drivers/cpufreq/ppc_cbe_cpufreq.c 	if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
cpu                74 drivers/cpufreq/ppc_cbe_cpufreq.c 	    !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
cpu                76 drivers/cpufreq/ppc_cbe_cpufreq.c 		of_node_put(cpu);
cpu                80 drivers/cpufreq/ppc_cbe_cpufreq.c 	max_freqp = of_get_property(cpu, "clock-frequency", NULL);
cpu                82 drivers/cpufreq/ppc_cbe_cpufreq.c 	of_node_put(cpu);
cpu               103 drivers/cpufreq/ppc_cbe_cpufreq.c 	cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
cpu               109 drivers/cpufreq/ppc_cbe_cpufreq.c 	cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
cpu               128 drivers/cpufreq/ppc_cbe_cpufreq.c 		 policy->cpu,
cpu               132 drivers/cpufreq/ppc_cbe_cpufreq.c 	return set_pmode(policy->cpu, cbe_pmode_new);
cpu                16 drivers/cpufreq/ppc_cbe_cpufreq.h int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
cpu                17 drivers/cpufreq/ppc_cbe_cpufreq.h int cbe_cpufreq_get_pmode(int cpu);
cpu                19 drivers/cpufreq/ppc_cbe_cpufreq.h int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
cpu                40 drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
cpu                52 drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c 	mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
cpu                53 drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c 	pmd_regs = cbe_get_cpu_pmd_regs(cpu);
cpu                92 drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c int cbe_cpufreq_get_pmode(int cpu)
cpu                97 drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c 	pmd_regs = cbe_get_cpu_pmd_regs(cpu);
cpu                35 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
cpu                43 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	pmi_msg.data1 =	cbe_cpu_to_node(cpu);
cpu                70 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	int cpu, ret;
cpu                77 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	cpu = cbe_node_to_cpu(node);
cpu                81 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 	policy = cpufreq_cpu_get(cpu);
cpu                83 drivers/cpufreq/ppc_cbe_cpufreq_pmi.c 		pr_warn("cpufreq policy not found cpu%d\n", cpu);
cpu               181 drivers/cpufreq/pxa2xx-cpufreq.c static unsigned int pxa_cpufreq_get(unsigned int cpu)
cpu               151 drivers/cpufreq/pxa3xx-cpufreq.c static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
cpu               161 drivers/cpufreq/pxa3xx-cpufreq.c 	if (policy->cpu != 0)
cpu                47 drivers/cpufreq/qcom-cpufreq-hw.c static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
cpu                53 drivers/cpufreq/qcom-cpufreq-hw.c 	policy = cpufreq_cpu_get_raw(cpu);
cpu               156 drivers/cpufreq/qcom-cpufreq-hw.c 	int cpu, ret;
cpu               158 drivers/cpufreq/qcom-cpufreq-hw.c 	for_each_possible_cpu(cpu) {
cpu               159 drivers/cpufreq/qcom-cpufreq-hw.c 		cpu_np = of_cpu_device_node_get(cpu);
cpu               171 drivers/cpufreq/qcom-cpufreq-hw.c 			cpumask_set_cpu(cpu, m);
cpu               185 drivers/cpufreq/qcom-cpufreq-hw.c 	cpu_dev = get_cpu_device(policy->cpu);
cpu               188 drivers/cpufreq/qcom-cpufreq-hw.c 		       policy->cpu);
cpu               192 drivers/cpufreq/qcom-cpufreq-hw.c 	cpu_np = of_cpu_device_node_get(policy->cpu);
cpu               253 drivers/cpufreq/qcom-cpufreq-hw.c 	struct device *cpu_dev = get_cpu_device(policy->cpu);
cpu               144 drivers/cpufreq/qcom-cpufreq-nvmem.c 	unsigned cpu;
cpu               208 drivers/cpufreq/qcom-cpufreq-nvmem.c 	for_each_possible_cpu(cpu) {
cpu               209 drivers/cpufreq/qcom-cpufreq-nvmem.c 		cpu_dev = get_cpu_device(cpu);
cpu               216 drivers/cpufreq/qcom-cpufreq-nvmem.c 			drv->opp_tables[cpu] =
cpu               219 drivers/cpufreq/qcom-cpufreq-nvmem.c 			if (IS_ERR(drv->opp_tables[cpu])) {
cpu               220 drivers/cpufreq/qcom-cpufreq-nvmem.c 				ret = PTR_ERR(drv->opp_tables[cpu]);
cpu               228 drivers/cpufreq/qcom-cpufreq-nvmem.c 			drv->genpd_opp_tables[cpu] =
cpu               232 drivers/cpufreq/qcom-cpufreq-nvmem.c 			if (IS_ERR(drv->genpd_opp_tables[cpu])) {
cpu               233 drivers/cpufreq/qcom-cpufreq-nvmem.c 				ret = PTR_ERR(drv->genpd_opp_tables[cpu]);
cpu               254 drivers/cpufreq/qcom-cpufreq-nvmem.c 	for_each_possible_cpu(cpu) {
cpu               255 drivers/cpufreq/qcom-cpufreq-nvmem.c 		if (IS_ERR_OR_NULL(drv->genpd_opp_tables[cpu]))
cpu               257 drivers/cpufreq/qcom-cpufreq-nvmem.c 		dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
cpu               261 drivers/cpufreq/qcom-cpufreq-nvmem.c 	for_each_possible_cpu(cpu) {
cpu               262 drivers/cpufreq/qcom-cpufreq-nvmem.c 		if (IS_ERR_OR_NULL(drv->opp_tables[cpu]))
cpu               264 drivers/cpufreq/qcom-cpufreq-nvmem.c 		dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
cpu               276 drivers/cpufreq/qcom-cpufreq-nvmem.c 	unsigned int cpu;
cpu               280 drivers/cpufreq/qcom-cpufreq-nvmem.c 	for_each_possible_cpu(cpu) {
cpu               281 drivers/cpufreq/qcom-cpufreq-nvmem.c 		if (drv->opp_tables[cpu])
cpu               282 drivers/cpufreq/qcom-cpufreq-nvmem.c 			dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
cpu               283 drivers/cpufreq/qcom-cpufreq-nvmem.c 		if (drv->genpd_opp_tables[cpu])
cpu               284 drivers/cpufreq/qcom-cpufreq-nvmem.c 			dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
cpu                73 drivers/cpufreq/qoriq-cpufreq.c static struct clk *cpu_to_clk(int cpu)
cpu                78 drivers/cpufreq/qoriq-cpufreq.c 	if (!cpu_present(cpu))
cpu                81 drivers/cpufreq/qoriq-cpufreq.c 	np = of_get_cpu_node(cpu, NULL);
cpu               169 drivers/cpufreq/qoriq-cpufreq.c 	unsigned int cpu = policy->cpu;
cpu               172 drivers/cpufreq/qoriq-cpufreq.c 	np = of_get_cpu_node(cpu, NULL);
cpu                87 drivers/cpufreq/s3c2416-cpufreq.c static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
cpu                91 drivers/cpufreq/s3c2416-cpufreq.c 	if (cpu != 0)
cpu               341 drivers/cpufreq/s3c2416-cpufreq.c 	if (policy->cpu != 0)
cpu               317 drivers/cpufreq/s3c24xx-cpufreq.c 		tmp_policy.cpu = policy->cpu;
cpu               149 drivers/cpufreq/s3c64xx-cpufreq.c 	if (policy->cpu != 0)
cpu               519 drivers/cpufreq/s5pv210-cpufreq.c 	if (policy->cpu != 0) {
cpu                37 drivers/cpufreq/sc520_freq.c static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
cpu                29 drivers/cpufreq/scmi-cpufreq.c static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
cpu                31 drivers/cpufreq/scmi-cpufreq.c 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
cpu                82 drivers/cpufreq/scmi-cpufreq.c 	int cpu, domain, tdomain;
cpu                89 drivers/cpufreq/scmi-cpufreq.c 	for_each_possible_cpu(cpu) {
cpu                90 drivers/cpufreq/scmi-cpufreq.c 		if (cpu == cpu_dev->id)
cpu                93 drivers/cpufreq/scmi-cpufreq.c 		tcpu_dev = get_cpu_device(cpu);
cpu                99 drivers/cpufreq/scmi-cpufreq.c 			cpumask_set_cpu(cpu, cpumask);
cpu               106 drivers/cpufreq/scmi-cpufreq.c scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu)
cpu               108 drivers/cpufreq/scmi-cpufreq.c 	struct device *cpu_dev = get_cpu_device(cpu);
cpu               113 drivers/cpufreq/scmi-cpufreq.c 		pr_err("failed to get cpu%d device\n", cpu);
cpu               142 drivers/cpufreq/scmi-cpufreq.c 	cpu_dev = get_cpu_device(policy->cpu);
cpu               144 drivers/cpufreq/scmi-cpufreq.c 		pr_err("failed to get cpu%d device\n", policy->cpu);
cpu                40 drivers/cpufreq/scpi-cpufreq.c static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
cpu                42 drivers/cpufreq/scpi-cpufreq.c 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
cpu                74 drivers/cpufreq/scpi-cpufreq.c 	int cpu, domain, tdomain;
cpu                81 drivers/cpufreq/scpi-cpufreq.c 	for_each_possible_cpu(cpu) {
cpu                82 drivers/cpufreq/scpi-cpufreq.c 		if (cpu == cpu_dev->id)
cpu                85 drivers/cpufreq/scpi-cpufreq.c 		tcpu_dev = get_cpu_device(cpu);
cpu                91 drivers/cpufreq/scpi-cpufreq.c 			cpumask_set_cpu(cpu, cpumask);
cpu               105 drivers/cpufreq/scpi-cpufreq.c 	cpu_dev = get_cpu_device(policy->cpu);
cpu               107 drivers/cpufreq/scpi-cpufreq.c 		pr_err("failed to get cpu%d device\n", policy->cpu);
cpu                54 drivers/cpufreq/sfi-cpufreq.c 	rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
cpu                58 drivers/cpufreq/sfi-cpufreq.c 	wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
cpu                38 drivers/cpufreq/sh-cpufreq.c static unsigned int sh_cpufreq_get(unsigned int cpu)
cpu                40 drivers/cpufreq/sh-cpufreq.c 	return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
cpu                47 drivers/cpufreq/sh-cpufreq.c 	int cpu = policy->cpu;
cpu                48 drivers/cpufreq/sh-cpufreq.c 	struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
cpu                53 drivers/cpufreq/sh-cpufreq.c 	if (smp_processor_id() != cpu)
cpu                56 drivers/cpufreq/sh-cpufreq.c 	dev = get_cpu_device(cpu);
cpu                66 drivers/cpufreq/sh-cpufreq.c 	freqs.old	= sh_cpufreq_get(cpu);
cpu                87 drivers/cpufreq/sh-cpufreq.c 	return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
cpu                92 drivers/cpufreq/sh-cpufreq.c 	struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
cpu               110 drivers/cpufreq/sh-cpufreq.c 	unsigned int cpu = policy->cpu;
cpu               111 drivers/cpufreq/sh-cpufreq.c 	struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
cpu               115 drivers/cpufreq/sh-cpufreq.c 	dev = get_cpu_device(cpu);
cpu               141 drivers/cpufreq/sh-cpufreq.c 	unsigned int cpu = policy->cpu;
cpu               142 drivers/cpufreq/sh-cpufreq.c 	struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
cpu               151 drivers/cpufreq/sh-cpufreq.c 	struct device *dev = get_cpu_device(policy->cpu);
cpu               234 drivers/cpufreq/sparc-us2e-cpufreq.c static unsigned int us2e_freq_get(unsigned int cpu)
cpu               238 drivers/cpufreq/sparc-us2e-cpufreq.c 	clock_tick = sparc64_get_clock_tick(cpu) / 1000;
cpu               239 drivers/cpufreq/sparc-us2e-cpufreq.c 	if (smp_call_function_single(cpu, __us2e_freq_get, &estar, 1))
cpu               247 drivers/cpufreq/sparc-us2e-cpufreq.c 	unsigned int cpu = smp_processor_id();
cpu               252 drivers/cpufreq/sparc-us2e-cpufreq.c 	new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
cpu               269 drivers/cpufreq/sparc-us2e-cpufreq.c 	unsigned int cpu = policy->cpu;
cpu               271 drivers/cpufreq/sparc-us2e-cpufreq.c 	return smp_call_function_single(cpu, __us2e_freq_target, &index, 1);
cpu               276 drivers/cpufreq/sparc-us2e-cpufreq.c 	unsigned int cpu = policy->cpu;
cpu               277 drivers/cpufreq/sparc-us2e-cpufreq.c 	unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
cpu               279 drivers/cpufreq/sparc-us2e-cpufreq.c 		&us2e_freq_table[cpu].table[0];
cpu                64 drivers/cpufreq/sparc-us3-cpufreq.c static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
cpu                66 drivers/cpufreq/sparc-us3-cpufreq.c 	unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
cpu                86 drivers/cpufreq/sparc-us3-cpufreq.c static unsigned int us3_freq_get(unsigned int cpu)
cpu                90 drivers/cpufreq/sparc-us3-cpufreq.c 	if (smp_call_function_single(cpu, read_safari_cfg, &reg, 1))
cpu                92 drivers/cpufreq/sparc-us3-cpufreq.c 	return get_current_freq(cpu, reg);
cpu                97 drivers/cpufreq/sparc-us3-cpufreq.c 	unsigned int cpu = policy->cpu;
cpu               100 drivers/cpufreq/sparc-us3-cpufreq.c 	new_freq = sparc64_get_clock_tick(cpu) / 1000;
cpu               119 drivers/cpufreq/sparc-us3-cpufreq.c 	return smp_call_function_single(cpu, update_safari_cfg, &new_bits, 1);
cpu               124 drivers/cpufreq/sparc-us3-cpufreq.c 	unsigned int cpu = policy->cpu;
cpu               125 drivers/cpufreq/sparc-us3-cpufreq.c 	unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
cpu               127 drivers/cpufreq/sparc-us3-cpufreq.c 		&us3_freq_table[cpu].table[0];
cpu               236 drivers/cpufreq/speedstep-centrino.c 	struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
cpu               240 drivers/cpufreq/speedstep-centrino.c 		if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
cpu               242 drivers/cpufreq/speedstep-centrino.c 		     strcmp(cpu->x86_model_id, model->model_name) == 0))
cpu               249 drivers/cpufreq/speedstep-centrino.c 		       cpu->x86_model_id);
cpu               256 drivers/cpufreq/speedstep-centrino.c 		       cpu->x86_model_id);
cpu               261 drivers/cpufreq/speedstep-centrino.c 	per_cpu(centrino_model, policy->cpu) = model;
cpu               287 drivers/cpufreq/speedstep-centrino.c static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
cpu               296 drivers/cpufreq/speedstep-centrino.c 	if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
cpu               297 drivers/cpufreq/speedstep-centrino.c 	    (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
cpu               298 drivers/cpufreq/speedstep-centrino.c 	    (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
cpu               303 drivers/cpufreq/speedstep-centrino.c 	if ((!per_cpu(centrino_model, cpu)) ||
cpu               304 drivers/cpufreq/speedstep-centrino.c 	    (!per_cpu(centrino_model, cpu)->op_points))
cpu               309 drivers/cpufreq/speedstep-centrino.c 		per_cpu(centrino_model, cpu)->op_points[i].frequency
cpu               312 drivers/cpufreq/speedstep-centrino.c 		if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data)
cpu               313 drivers/cpufreq/speedstep-centrino.c 			return per_cpu(centrino_model, cpu)->
cpu               317 drivers/cpufreq/speedstep-centrino.c 		return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
cpu               323 drivers/cpufreq/speedstep-centrino.c static unsigned int get_cur_freq(unsigned int cpu)
cpu               328 drivers/cpufreq/speedstep-centrino.c 	rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
cpu               329 drivers/cpufreq/speedstep-centrino.c 	clock_freq = extract_clock(l, cpu, 0);
cpu               338 drivers/cpufreq/speedstep-centrino.c 		rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
cpu               339 drivers/cpufreq/speedstep-centrino.c 		clock_freq = extract_clock(l, cpu, 1);
cpu               347 drivers/cpufreq/speedstep-centrino.c 	struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
cpu               352 drivers/cpufreq/speedstep-centrino.c 	if (cpu->x86_vendor != X86_VENDOR_INTEL ||
cpu               353 drivers/cpufreq/speedstep-centrino.c 	    !cpu_has(cpu, X86_FEATURE_EST))
cpu               356 drivers/cpufreq/speedstep-centrino.c 	if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
cpu               359 drivers/cpufreq/speedstep-centrino.c 	if (policy->cpu != 0)
cpu               363 drivers/cpufreq/speedstep-centrino.c 		if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
cpu               367 drivers/cpufreq/speedstep-centrino.c 		per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
cpu               369 drivers/cpufreq/speedstep-centrino.c 	if (!per_cpu(centrino_cpu, policy->cpu)) {
cpu               398 drivers/cpufreq/speedstep-centrino.c 	policy->freq_table = per_cpu(centrino_model, policy->cpu)->op_points;
cpu               405 drivers/cpufreq/speedstep-centrino.c 	unsigned int cpu = policy->cpu;
cpu               407 drivers/cpufreq/speedstep-centrino.c 	if (!per_cpu(centrino_model, cpu))
cpu               410 drivers/cpufreq/speedstep-centrino.c 	per_cpu(centrino_model, cpu) = NULL;
cpu               424 drivers/cpufreq/speedstep-centrino.c 	unsigned int	msr, oldmsr = 0, h = 0, cpu = policy->cpu;
cpu               433 drivers/cpufreq/speedstep-centrino.c 	if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
cpu               439 drivers/cpufreq/speedstep-centrino.c 	op_points = &per_cpu(centrino_model, cpu)->op_points[index];
cpu               241 drivers/cpufreq/speedstep-ich.c static unsigned int speedstep_get(unsigned int cpu)
cpu               246 drivers/cpufreq/speedstep-ich.c 	BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1));
cpu               296 drivers/cpufreq/speedstep-ich.c 	cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
cpu               235 drivers/cpufreq/speedstep-smi.c 	if (policy->cpu != 0)
cpu               272 drivers/cpufreq/speedstep-smi.c static unsigned int speedstep_get(unsigned int cpu)
cpu               274 drivers/cpufreq/speedstep-smi.c 	if (cpu)
cpu                50 drivers/cpufreq/sti-cpufreq.c 	struct device *cpu;
cpu                56 drivers/cpufreq/sti-cpufreq.c 	struct device_node *np = ddata.cpu->of_node;
cpu                57 drivers/cpufreq/sti-cpufreq.c 	struct device *dev = ddata.cpu;
cpu                82 drivers/cpufreq/sti-cpufreq.c 	struct device *dev = ddata.cpu;
cpu               113 drivers/cpufreq/sti-cpufreq.c 	struct device *dev = ddata.cpu;
cpu               152 drivers/cpufreq/sti-cpufreq.c 	struct device *dev = ddata.cpu;
cpu               238 drivers/cpufreq/sti-cpufreq.c 	struct device *dev = ddata.cpu;
cpu               264 drivers/cpufreq/sti-cpufreq.c 	ddata.cpu = get_cpu_device(0);
cpu               265 drivers/cpufreq/sti-cpufreq.c 	if (!ddata.cpu) {
cpu               266 drivers/cpufreq/sti-cpufreq.c 		dev_err(ddata.cpu, "Failed to get device for CPU0\n");
cpu               270 drivers/cpufreq/sti-cpufreq.c 	if (!of_get_property(ddata.cpu->of_node, "operating-points-v2", NULL)) {
cpu               271 drivers/cpufreq/sti-cpufreq.c 		dev_err(ddata.cpu, "OPP-v2 not supported\n");
cpu               284 drivers/cpufreq/sti-cpufreq.c 	dev_err(ddata.cpu, "Not doing voltage scaling\n");
cpu                91 drivers/cpufreq/sun50i-cpufreq-nvmem.c 	unsigned int cpu;
cpu               106 drivers/cpufreq/sun50i-cpufreq-nvmem.c 	for_each_possible_cpu(cpu) {
cpu               107 drivers/cpufreq/sun50i-cpufreq-nvmem.c 		struct device *cpu_dev = get_cpu_device(cpu);
cpu               114 drivers/cpufreq/sun50i-cpufreq-nvmem.c 		opp_tables[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
cpu               115 drivers/cpufreq/sun50i-cpufreq-nvmem.c 		if (IS_ERR(opp_tables[cpu])) {
cpu               116 drivers/cpufreq/sun50i-cpufreq-nvmem.c 			ret = PTR_ERR(opp_tables[cpu]);
cpu               133 drivers/cpufreq/sun50i-cpufreq-nvmem.c 	for_each_possible_cpu(cpu) {
cpu               134 drivers/cpufreq/sun50i-cpufreq-nvmem.c 		if (IS_ERR_OR_NULL(opp_tables[cpu]))
cpu               136 drivers/cpufreq/sun50i-cpufreq-nvmem.c 		dev_pm_opp_put_prop_name(opp_tables[cpu]);
cpu               146 drivers/cpufreq/sun50i-cpufreq-nvmem.c 	unsigned int cpu;
cpu               150 drivers/cpufreq/sun50i-cpufreq-nvmem.c 	for_each_possible_cpu(cpu)
cpu               151 drivers/cpufreq/sun50i-cpufreq-nvmem.c 		dev_pm_opp_put_prop_name(opp_tables[cpu]);
cpu                65 drivers/cpufreq/tegra186-cpufreq.c 			if (info->cpus[core] == policy->cpu)
cpu                27 drivers/cpufreq/unicore2-cpufreq.c 	if (policy->cpu)
cpu                53 drivers/cpufreq/unicore2-cpufreq.c 	if (policy->cpu != 0)
cpu               314 drivers/cpuidle/coupled.c 	int cpu = (unsigned long)info;
cpu               315 drivers/cpuidle/coupled.c 	cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
cpu               316 drivers/cpuidle/coupled.c 	cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
cpu               331 drivers/cpuidle/coupled.c static void cpuidle_coupled_poke(int cpu)
cpu               333 drivers/cpuidle/coupled.c 	call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
cpu               335 drivers/cpuidle/coupled.c 	if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
cpu               336 drivers/cpuidle/coupled.c 		smp_call_function_single_async(cpu, csd);
cpu               349 drivers/cpuidle/coupled.c 	int cpu;
cpu               351 drivers/cpuidle/coupled.c 	for_each_cpu(cpu, &coupled->coupled_cpus)
cpu               352 drivers/cpuidle/coupled.c 		if (cpu != this_cpu && cpu_online(cpu))
cpu               353 drivers/cpuidle/coupled.c 			cpuidle_coupled_poke(cpu);
cpu               365 drivers/cpuidle/coupled.c static int cpuidle_coupled_set_waiting(int cpu,
cpu               368 drivers/cpuidle/coupled.c 	coupled->requested_state[cpu] = next_state;
cpu               384 drivers/cpuidle/coupled.c static void cpuidle_coupled_set_not_waiting(int cpu,
cpu               395 drivers/cpuidle/coupled.c 	coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
cpu               407 drivers/cpuidle/coupled.c static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
cpu               409 drivers/cpuidle/coupled.c 	cpuidle_coupled_set_not_waiting(cpu, coupled);
cpu               426 drivers/cpuidle/coupled.c static int cpuidle_coupled_clear_pokes(int cpu)
cpu               428 drivers/cpuidle/coupled.c 	if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
cpu               432 drivers/cpuidle/coupled.c 	while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
cpu               480 drivers/cpuidle/coupled.c 		cpuidle_coupled_clear_pokes(dev->cpu);
cpu               494 drivers/cpuidle/coupled.c 	cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
cpu               496 drivers/cpuidle/coupled.c 	w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
cpu               505 drivers/cpuidle/coupled.c 		cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
cpu               506 drivers/cpuidle/coupled.c 		cpuidle_coupled_poke_others(dev->cpu, coupled);
cpu               519 drivers/cpuidle/coupled.c 			!cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
cpu               520 drivers/cpuidle/coupled.c 		if (cpuidle_coupled_clear_pokes(dev->cpu))
cpu               524 drivers/cpuidle/coupled.c 			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
cpu               529 drivers/cpuidle/coupled.c 			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
cpu               538 drivers/cpuidle/coupled.c 	cpuidle_coupled_clear_pokes(dev->cpu);
cpu               540 drivers/cpuidle/coupled.c 		cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
cpu               585 drivers/cpuidle/coupled.c 		cpuidle_coupled_set_done(dev->cpu, coupled);
cpu               596 drivers/cpuidle/coupled.c 	cpuidle_coupled_set_done(dev->cpu, coupled);
cpu               643 drivers/cpuidle/coupled.c 	int cpu;
cpu               651 drivers/cpuidle/coupled.c 	for_each_cpu(cpu, &dev->coupled_cpus) {
cpu               652 drivers/cpuidle/coupled.c 		other_dev = per_cpu(cpuidle_devices, cpu);
cpu               675 drivers/cpuidle/coupled.c 	csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
cpu               677 drivers/cpuidle/coupled.c 	csd->info = (void *)(unsigned long)dev->cpu;
cpu               711 drivers/cpuidle/coupled.c 	int cpu = get_cpu();
cpu               715 drivers/cpuidle/coupled.c 	cpuidle_coupled_poke_others(cpu, coupled);
cpu               730 drivers/cpuidle/coupled.c 	int cpu = get_cpu();
cpu               739 drivers/cpuidle/coupled.c 	cpuidle_coupled_poke_others(cpu, coupled);
cpu               743 drivers/cpuidle/coupled.c static int coupled_cpu_online(unsigned int cpu)
cpu               749 drivers/cpuidle/coupled.c 	dev = per_cpu(cpuidle_devices, cpu);
cpu               759 drivers/cpuidle/coupled.c static int coupled_cpu_up_prepare(unsigned int cpu)
cpu               765 drivers/cpuidle/coupled.c 	dev = per_cpu(cpuidle_devices, cpu);
cpu                77 drivers/cpuidle/cpuidle-arm.c static int __init arm_idle_init_cpu(int cpu)
cpu                86 drivers/cpuidle/cpuidle-arm.c 	drv->cpumask = (struct cpumask *)cpumask_of(cpu);
cpu               105 drivers/cpuidle/cpuidle-arm.c 	ret = arm_cpuidle_init(cpu);
cpu               118 drivers/cpuidle/cpuidle-arm.c 			pr_err("CPU %d failed to init idle CPU ops\n", cpu);
cpu               143 drivers/cpuidle/cpuidle-arm.c 	int cpu, ret;
cpu               147 drivers/cpuidle/cpuidle-arm.c 	for_each_possible_cpu(cpu) {
cpu               148 drivers/cpuidle/cpuidle-arm.c 		ret = arm_idle_init_cpu(cpu);
cpu               156 drivers/cpuidle/cpuidle-arm.c 	while (--cpu >= 0) {
cpu               157 drivers/cpuidle/cpuidle-arm.c 		dev = per_cpu(cpuidle_devices, cpu);
cpu               105 drivers/cpuidle/cpuidle-big_little.c 	unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpu               107 drivers/cpuidle/cpuidle-big_little.c 	mcpm_set_entry_vector(cpu, cluster, cpu_resume);
cpu               141 drivers/cpuidle/cpuidle-big_little.c 	int cpu;
cpu               147 drivers/cpuidle/cpuidle-big_little.c 	for_each_possible_cpu(cpu)
cpu               148 drivers/cpuidle/cpuidle-big_little.c 		if (smp_cpuid_part(cpu) == part_id)
cpu               149 drivers/cpuidle/cpuidle-big_little.c 			cpumask_set_cpu(cpu, cpumask);
cpu                36 drivers/cpuidle/cpuidle-cps.c 	if (cpus_are_siblings(0, dev->cpu) && (index > STATE_NC_WAIT))
cpu               104 drivers/cpuidle/cpuidle-cps.c 	int cpu;
cpu               107 drivers/cpuidle/cpuidle-cps.c 	for_each_possible_cpu(cpu) {
cpu               108 drivers/cpuidle/cpuidle-cps.c 		device = &per_cpu(cpuidle_dev, cpu);
cpu               117 drivers/cpuidle/cpuidle-cps.c 	int err, cpu, i;
cpu               158 drivers/cpuidle/cpuidle-cps.c 	for_each_possible_cpu(cpu) {
cpu               159 drivers/cpuidle/cpuidle-cps.c 		device = &per_cpu(cpuidle_dev, cpu);
cpu               160 drivers/cpuidle/cpuidle-cps.c 		device->cpu = cpu;
cpu               162 drivers/cpuidle/cpuidle-cps.c 		cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
cpu               168 drivers/cpuidle/cpuidle-cps.c 			       cpu);
cpu                43 drivers/cpuidle/cpuidle-exynos.c 	ret = dev->cpu ? exynos_cpuidle_pdata->cpu1_powerdown()
cpu                65 drivers/cpuidle/cpuidle-exynos.c 	if (num_online_cpus() > 1 || dev->cpu != 0)
cpu                53 drivers/cpuidle/cpuidle-haltpoll.c static int haltpoll_cpu_online(unsigned int cpu)
cpu                57 drivers/cpuidle/cpuidle-haltpoll.c 	dev = per_cpu_ptr(haltpoll_cpuidle_devices, cpu);
cpu                59 drivers/cpuidle/cpuidle-haltpoll.c 		dev->cpu = cpu;
cpu                61 drivers/cpuidle/cpuidle-haltpoll.c 			pr_notice("cpuidle_register_device %d failed!\n", cpu);
cpu                64 drivers/cpuidle/cpuidle-haltpoll.c 		arch_haltpoll_enable(cpu);
cpu                70 drivers/cpuidle/cpuidle-haltpoll.c static int haltpoll_cpu_offline(unsigned int cpu)
cpu                74 drivers/cpuidle/cpuidle-haltpoll.c 	dev = per_cpu_ptr(haltpoll_cpuidle_devices, cpu);
cpu                76 drivers/cpuidle/cpuidle-haltpoll.c 		arch_haltpoll_disable(cpu);
cpu               164 drivers/cpuidle/cpuidle-powernv.c static int powernv_cpuidle_cpu_online(unsigned int cpu)
cpu               166 drivers/cpuidle/cpuidle-powernv.c 	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
cpu               176 drivers/cpuidle/cpuidle-powernv.c static int powernv_cpuidle_cpu_dead(unsigned int cpu)
cpu               178 drivers/cpuidle/cpuidle-powernv.c 	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
cpu                76 drivers/cpuidle/cpuidle-psci.c static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
cpu               108 drivers/cpuidle/cpuidle-psci.c 	per_cpu(psci_power_state, cpu) = psci_states;
cpu               116 drivers/cpuidle/cpuidle-psci.c static __init int psci_cpu_init_idle(unsigned int cpu)
cpu               128 drivers/cpuidle/cpuidle-psci.c 	cpu_node = of_cpu_device_node_get(cpu);
cpu               132 drivers/cpuidle/cpuidle-psci.c 	ret = psci_dt_cpu_init_idle(cpu_node, cpu);
cpu               139 drivers/cpuidle/cpuidle-psci.c static int __init psci_idle_init_cpu(int cpu)
cpu               146 drivers/cpuidle/cpuidle-psci.c 	cpu_node = of_cpu_device_node_get(cpu);
cpu               166 drivers/cpuidle/cpuidle-psci.c 	drv->cpumask = (struct cpumask *)cpumask_of(cpu);
cpu               188 drivers/cpuidle/cpuidle-psci.c 	ret = psci_cpu_init_idle(cpu);
cpu               190 drivers/cpuidle/cpuidle-psci.c 		pr_err("CPU %d failed to PSCI idle\n", cpu);
cpu               214 drivers/cpuidle/cpuidle-psci.c 	int cpu, ret;
cpu               218 drivers/cpuidle/cpuidle-psci.c 	for_each_possible_cpu(cpu) {
cpu               219 drivers/cpuidle/cpuidle-psci.c 		ret = psci_idle_init_cpu(cpu);
cpu               227 drivers/cpuidle/cpuidle-psci.c 	while (--cpu >= 0) {
cpu               228 drivers/cpuidle/cpuidle-psci.c 		dev = per_cpu(cpuidle_devices, cpu);
cpu               191 drivers/cpuidle/cpuidle-pseries.c static int pseries_cpuidle_cpu_online(unsigned int cpu)
cpu               193 drivers/cpuidle/cpuidle-pseries.c 	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
cpu               203 drivers/cpuidle/cpuidle-pseries.c static int pseries_cpuidle_cpu_dead(unsigned int cpu)
cpu               205 drivers/cpuidle/cpuidle-pseries.c 	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
cpu               225 drivers/cpuidle/cpuidle.c 	trace_cpu_idle_rcuidle(index, dev->cpu);
cpu               234 drivers/cpuidle/cpuidle.c 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
cpu               549 drivers/cpuidle/cpuidle.c 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
cpu               577 drivers/cpuidle/cpuidle.c 	per_cpu(cpuidle_devices, dev->cpu) = dev;
cpu               668 drivers/cpuidle/cpuidle.c 	int cpu;
cpu               671 drivers/cpuidle/cpuidle.c 	for_each_cpu(cpu, drv->cpumask) {
cpu               672 drivers/cpuidle/cpuidle.c 		device = &per_cpu(cpuidle_dev, cpu);
cpu               694 drivers/cpuidle/cpuidle.c 	int ret, cpu;
cpu               703 drivers/cpuidle/cpuidle.c 	for_each_cpu(cpu, drv->cpumask) {
cpu               704 drivers/cpuidle/cpuidle.c 		device = &per_cpu(cpuidle_dev, cpu);
cpu               705 drivers/cpuidle/cpuidle.c 		device->cpu = cpu;
cpu               720 drivers/cpuidle/cpuidle.c 		pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
cpu                35 drivers/cpuidle/driver.c static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
cpu                37 drivers/cpuidle/driver.c 	return per_cpu(cpuidle_drivers, cpu);
cpu                50 drivers/cpuidle/driver.c 	int cpu;
cpu                52 drivers/cpuidle/driver.c 	for_each_cpu(cpu, drv->cpumask) {
cpu                54 drivers/cpuidle/driver.c 		if (drv != __cpuidle_get_cpu_driver(cpu))
cpu                57 drivers/cpuidle/driver.c 		per_cpu(cpuidle_drivers, cpu) = NULL;
cpu                70 drivers/cpuidle/driver.c 	int cpu;
cpu                72 drivers/cpuidle/driver.c 	for_each_cpu(cpu, drv->cpumask) {
cpu                75 drivers/cpuidle/driver.c 		old_drv = __cpuidle_get_cpu_driver(cpu);
cpu                80 drivers/cpuidle/driver.c 	for_each_cpu(cpu, drv->cpumask)
cpu                81 drivers/cpuidle/driver.c 		per_cpu(cpuidle_drivers, cpu) = drv;
cpu                97 drivers/cpuidle/driver.c static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
cpu               315 drivers/cpuidle/driver.c 	int cpu;
cpu               317 drivers/cpuidle/driver.c 	cpu = get_cpu();
cpu               318 drivers/cpuidle/driver.c 	drv = __cpuidle_get_cpu_driver(cpu);
cpu               337 drivers/cpuidle/driver.c 	return __cpuidle_get_cpu_driver(dev->cpu);
cpu               100 drivers/cpuidle/dt_idle_states.c 	int cpu;
cpu               111 drivers/cpuidle/dt_idle_states.c 	for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
cpu               112 drivers/cpuidle/dt_idle_states.c 	     cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
cpu               113 drivers/cpuidle/dt_idle_states.c 		cpu_node = of_cpu_device_node_get(cpu);
cpu               110 drivers/cpuidle/governor.c int cpuidle_governor_latency_req(unsigned int cpu)
cpu               113 drivers/cpuidle/governor.c 	struct device *device = get_cpu_device(cpu);
cpu                52 drivers/cpuidle/governors/haltpoll.c 	int latency_req = cpuidle_governor_latency_req(dev->cpu);
cpu                73 drivers/cpuidle/governors/ladder.c 	int latency_req = cpuidle_governor_latency_req(dev->cpu);
cpu               138 drivers/cpuidle/governors/ladder.c 	struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
cpu               279 drivers/cpuidle/governors/menu.c 	int latency_req = cpuidle_governor_latency_req(dev->cpu);
cpu               295 drivers/cpuidle/governors/menu.c 	nr_iowaiters = nr_iowait_cpu(dev->cpu);
cpu               558 drivers/cpuidle/governors/menu.c 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
cpu               119 drivers/cpuidle/governors/teo.c 	struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
cpu               234 drivers/cpuidle/governors/teo.c 	struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
cpu               235 drivers/cpuidle/governors/teo.c 	int latency_req = cpuidle_governor_latency_req(dev->cpu);
cpu               440 drivers/cpuidle/governors/teo.c 	struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
cpu               465 drivers/cpuidle/governors/teo.c 	struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
cpu               682 drivers/cpuidle/sysfs.c 	struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
cpu               848 drivers/crypto/caam/caamalg_qi.c 			int cpu;
cpu               855 drivers/crypto/caam/caamalg_qi.c 			cpu = smp_processor_id();
cpu               856 drivers/crypto/caam/caamalg_qi.c 			drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
cpu              4639 drivers/crypto/caam/caamalg_qi2.c 	int err, i = 0, cpu;
cpu              4641 drivers/crypto/caam/caamalg_qi2.c 	for_each_online_cpu(cpu) {
cpu              4642 drivers/crypto/caam/caamalg_qi2.c 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
cpu              4647 drivers/crypto/caam/caamalg_qi2.c 		nctx->desired_cpu = cpu;
cpu              4651 drivers/crypto/caam/caamalg_qi2.c 		ppriv->dpio = dpaa2_io_service_select(cpu);
cpu              4654 drivers/crypto/caam/caamalg_qi2.c 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
cpu              4681 drivers/crypto/caam/caamalg_qi2.c 	for_each_online_cpu(cpu) {
cpu              4682 drivers/crypto/caam/caamalg_qi2.c 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
cpu              4688 drivers/crypto/caam/caamalg_qi2.c 	for_each_online_cpu(cpu) {
cpu              4689 drivers/crypto/caam/caamalg_qi2.c 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
cpu              4701 drivers/crypto/caam/caamalg_qi2.c 	int i = 0, cpu;
cpu              4703 drivers/crypto/caam/caamalg_qi2.c 	for_each_online_cpu(cpu) {
cpu              4704 drivers/crypto/caam/caamalg_qi2.c 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
cpu              4720 drivers/crypto/caam/caamalg_qi2.c 	int err = 0, i = 0, cpu;
cpu              4723 drivers/crypto/caam/caamalg_qi2.c 	for_each_online_cpu(cpu) {
cpu              4724 drivers/crypto/caam/caamalg_qi2.c 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
cpu              4941 drivers/crypto/caam/caamalg_qi2.c 	int err, cpu;
cpu              5012 drivers/crypto/caam/caamalg_qi2.c 	for_each_online_cpu(cpu) {
cpu              5017 drivers/crypto/caam/caamalg_qi2.c 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
cpu               392 drivers/crypto/caam/qi.c 				       int *cpu,
cpu               431 drivers/crypto/caam/qi.c 	if (!cpumask_test_cpu(*cpu, cpus)) {
cpu               437 drivers/crypto/caam/qi.c 		*cpu = *pcpu;
cpu               441 drivers/crypto/caam/qi.c 	drv_ctx->cpu = *cpu;
cpu               444 drivers/crypto/caam/qi.c 	drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
cpu               604 drivers/crypto/caam/qi.c static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
cpu               630 drivers/crypto/caam/qi.c 	qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
cpu               643 drivers/crypto/caam/qi.c 	per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
cpu               645 drivers/crypto/caam/qi.c 	dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
cpu                65 drivers/crypto/caam/qi.h 	int cpu;
cpu               107 drivers/crypto/caam/qi.h struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
cpu               617 drivers/crypto/cavium/cpt/cptvf_main.c 	int cpu;
cpu               626 drivers/crypto/cavium/cpt/cptvf_main.c 	cpu = cptvf->vfid % num_online_cpus();
cpu               627 drivers/crypto/cavium/cpt/cptvf_main.c 	cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
cpu               293 drivers/crypto/cavium/nitrox/nitrox_isr.c 	int nr_vecs, vec, cpu;
cpu               342 drivers/crypto/cavium/nitrox/nitrox_isr.c 		cpu = qvec->ring % num_online_cpus();
cpu               343 drivers/crypto/cavium/nitrox/nitrox_isr.c 		irq_set_affinity_hint(vec, get_cpu_mask(cpu));
cpu               363 drivers/crypto/cavium/nitrox/nitrox_isr.c 	cpu = num_online_cpus();
cpu               364 drivers/crypto/cavium/nitrox/nitrox_isr.c 	irq_set_affinity_hint(vec, get_cpu_mask(cpu));
cpu               407 drivers/crypto/cavium/nitrox/nitrox_isr.c 	int vec, cpu;
cpu               441 drivers/crypto/cavium/nitrox/nitrox_isr.c 	cpu = num_online_cpus();
cpu               442 drivers/crypto/cavium/nitrox/nitrox_isr.c 	irq_set_affinity_hint(vec, get_cpu_mask(cpu));
cpu               531 drivers/crypto/n2_core.c 	int nbytes, cpu;
cpu               552 drivers/crypto/n2_core.c 	cpu = get_cpu();
cpu               553 drivers/crypto/n2_core.c 	qp = cpu_to_cwq[cpu];
cpu              1662 drivers/crypto/n2_core.c 	int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
cpu              1665 drivers/crypto/n2_core.c 	return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
cpu               112 drivers/crypto/padlock-aes.c 	int cpu;
cpu               157 drivers/crypto/padlock-aes.c 	for_each_online_cpu(cpu)
cpu               158 drivers/crypto/padlock-aes.c 		if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
cpu               159 drivers/crypto/padlock-aes.c 		    &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
cpu               160 drivers/crypto/padlock-aes.c 			per_cpu(paes_last_cword, cpu) = NULL;
cpu               170 drivers/crypto/padlock-aes.c 	int cpu = raw_smp_processor_id();
cpu               172 drivers/crypto/padlock-aes.c 	if (cword != per_cpu(paes_last_cword, cpu))
cpu               176 drivers/crypto/qat/qat_common/adf_isr.c 			unsigned int cpu, cpus = num_online_cpus();
cpu               190 drivers/crypto/qat/qat_common/adf_isr.c 			cpu = ((accel_dev->accel_id * hw_data->num_banks) +
cpu               193 drivers/crypto/qat/qat_common/adf_isr.c 					      get_cpu_mask(cpu));
cpu               239 drivers/crypto/qat/qat_common/adf_vf_isr.c 	unsigned int cpu;
cpu               252 drivers/crypto/qat/qat_common/adf_vf_isr.c 	cpu = accel_dev->accel_id % num_online_cpus();
cpu               253 drivers/crypto/qat/qat_common/adf_vf_isr.c 	irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
cpu               123 drivers/crypto/virtio/virtio_crypto_common.h 	int cpu, node;
cpu               125 drivers/crypto/virtio/virtio_crypto_common.h 	cpu = get_cpu();
cpu               126 drivers/crypto/virtio/virtio_crypto_common.h 	node = topology_physical_package_id(cpu);
cpu               146 drivers/crypto/virtio/virtio_crypto_core.c 	int cpu;
cpu               163 drivers/crypto/virtio/virtio_crypto_core.c 	for_each_online_cpu(cpu) {
cpu               164 drivers/crypto/virtio/virtio_crypto_core.c 		virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
cpu               257 drivers/dca/dca-core.c static u8 dca_common_get_tag(struct device *dev, int cpu)
cpu               270 drivers/dca/dca-core.c 	tag = dca->ops->get_tag(dca, dev, cpu);
cpu               282 drivers/dca/dca-core.c u8 dca3_get_tag(struct device *dev, int cpu)
cpu               287 drivers/dca/dca-core.c 	return dca_common_get_tag(dev, cpu);
cpu               295 drivers/dca/dca-core.c u8 dca_get_tag(int cpu)
cpu               299 drivers/dca/dca-core.c 	return dca_common_get_tag(dev, cpu);
cpu                62 drivers/devfreq/event/exynos-ppmu.c 	PPMU_EVENT(cpu),
cpu                90 drivers/devfreq/event/exynos-ppmu.c 	PPMU_EVENT(d0-cpu),
cpu                93 drivers/devfreq/event/exynos-ppmu.c 	PPMU_EVENT(d1-cpu),
cpu               375 drivers/dma/dmaengine.c static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
cpu               379 drivers/dma/dmaengine.c 		cpumask_test_cpu(cpu, cpumask_of_node(node));
cpu               392 drivers/dma/dmaengine.c static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
cpu               409 drivers/dma/dmaengine.c 			if (dma_chan_is_local(chan, cpu))
cpu               436 drivers/dma/dmaengine.c 	int cpu;
cpu               441 drivers/dma/dmaengine.c 		for_each_possible_cpu(cpu)
cpu               442 drivers/dma/dmaengine.c 			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
cpu               457 drivers/dma/dmaengine.c 		for_each_online_cpu(cpu) {
cpu               458 drivers/dma/dmaengine.c 			chan = min_chan(cap, cpu);
cpu               459 drivers/dma/dmaengine.c 			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
cpu               755 drivers/dma/fsl-qdma.c 	int cpu;
cpu               792 drivers/dma/fsl-qdma.c 		cpu = i % num_online_cpus();
cpu               794 drivers/dma/fsl-qdma.c 					    get_cpu_mask(cpu));
cpu               798 drivers/dma/fsl-qdma.c 				cpu,
cpu                17 drivers/dma/ioat/dca.c #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
cpu               186 drivers/dma/ioat/dca.c 			    int cpu)
cpu               195 drivers/dma/ioat/dca.c 	apic_id = cpu_physical_id(cpu);
cpu              3026 drivers/edac/amd64_edac.c 	int cpu;
cpu              3028 drivers/edac/amd64_edac.c 	for_each_online_cpu(cpu)
cpu              3029 drivers/edac/amd64_edac.c 		if (amd_get_nb_id(cpu) == nid)
cpu              3030 drivers/edac/amd64_edac.c 			cpumask_set_cpu(cpu, mask);
cpu              3037 drivers/edac/amd64_edac.c 	int cpu, nbe;
cpu              3049 drivers/edac/amd64_edac.c 	for_each_cpu(cpu, mask) {
cpu              3050 drivers/edac/amd64_edac.c 		struct msr *reg = per_cpu_ptr(msrs, cpu);
cpu              3054 drivers/edac/amd64_edac.c 			 cpu, reg->q,
cpu              3070 drivers/edac/amd64_edac.c 	int cpu;
cpu              3081 drivers/edac/amd64_edac.c 	for_each_cpu(cpu, cmask) {
cpu              3083 drivers/edac/amd64_edac.c 		struct msr *reg = per_cpu_ptr(msrs, cpu);
cpu                45 drivers/edac/octeon_edac-pc.c 	unsigned int cpu = smp_processor_id();
cpu                59 drivers/edac/octeon_edac-pc.c 				   (unsigned long long)icache_err, core, cpu,
cpu                62 drivers/edac/octeon_edac-pc.c 		edac_device_handle_ce(p->ed, cpu, 1, "icache");
cpu                67 drivers/edac/octeon_edac-pc.c 				   (unsigned long long)dcache_err, core, cpu,
cpu                70 drivers/edac/octeon_edac-pc.c 			edac_device_handle_ue(p->ed, cpu, 0, "dcache");
cpu                72 drivers/edac/octeon_edac-pc.c 			edac_device_handle_ce(p->ed, cpu, 0, "dcache");
cpu               753 drivers/edac/xgene_edac.c 				      int cpu)
cpu               756 drivers/edac/xgene_edac.c 	void __iomem *pg_f = ctx->pmd_csr + cpu * CPU_CSR_STRIDE +
cpu               249 drivers/firmware/arm_sdei.c 		int cpu;
cpu               258 drivers/firmware/arm_sdei.c 		for_each_possible_cpu(cpu) {
cpu               259 drivers/firmware/arm_sdei.c 			reg = per_cpu_ptr(regs, cpu);
cpu               695 drivers/firmware/arm_sdei.c static int sdei_cpuhp_down(unsigned int cpu)
cpu               718 drivers/firmware/arm_sdei.c static int sdei_cpuhp_up(unsigned int cpu)
cpu                50 drivers/firmware/psci/psci.c bool psci_tos_resident_on(int cpu)
cpu                52 drivers/firmware/psci/psci.c 	return cpu == resident_cpu;
cpu               354 drivers/firmware/psci/psci.c 	int type, cpu = -1;
cpu               381 drivers/firmware/psci/psci.c 	cpu = get_logical_index(cpuid);
cpu               382 drivers/firmware/psci/psci.c 	resident_cpu = cpu >= 0 ? cpu : -1;
cpu                48 drivers/firmware/psci/psci_checker.c 	int cpu;
cpu                61 drivers/firmware/psci/psci_checker.c 		for_each_online_cpu(cpu)
cpu                62 drivers/firmware/psci/psci_checker.c 			if (psci_tos_resident_on(cpu)) {
cpu                63 drivers/firmware/psci/psci_checker.c 				tos_resident_cpu = cpu;
cpu                80 drivers/firmware/psci/psci_checker.c 	int cpu;
cpu                86 drivers/firmware/psci/psci_checker.c 	for_each_cpu(cpu, cpus) {
cpu                87 drivers/firmware/psci/psci_checker.c 		int ret = cpu_down(cpu);
cpu                97 drivers/firmware/psci/psci_checker.c 				       ret, cpu);
cpu               100 drivers/firmware/psci/psci_checker.c 		} else if (cpu == tos_resident_cpu) {
cpu               104 drivers/firmware/psci/psci_checker.c 				       ret, cpu);
cpu               109 drivers/firmware/psci/psci_checker.c 			       "to power down CPU %d\n", ret, cpu);
cpu               114 drivers/firmware/psci/psci_checker.c 			cpumask_set_cpu(cpu, offlined_cpus);
cpu               118 drivers/firmware/psci/psci_checker.c 	for_each_cpu(cpu, offlined_cpus) {
cpu               119 drivers/firmware/psci/psci_checker.c 		int ret = cpu_up(cpu);
cpu               123 drivers/firmware/psci/psci_checker.c 			       "to power up CPU %d\n", ret, cpu);
cpu               126 drivers/firmware/psci/psci_checker.c 			cpumask_clear_cpu(cpu, offlined_cpus);
cpu               273 drivers/firmware/psci/psci_checker.c 	int cpu = (long)arg;
cpu               287 drivers/firmware/psci/psci_checker.c 			cpu);
cpu               293 drivers/firmware/psci/psci_checker.c 		cpu, drv->state_count - 1);
cpu               336 drivers/firmware/psci/psci_checker.c 				       cpu, ret, index, i);
cpu               356 drivers/firmware/psci/psci_checker.c 			cpu);
cpu               366 drivers/firmware/psci/psci_checker.c 		cpu, nb_suspend, nb_shallow_sleep, nb_err);
cpu               375 drivers/firmware/psci/psci_checker.c 	int i, cpu, err = 0;
cpu               393 drivers/firmware/psci/psci_checker.c 	for_each_online_cpu(cpu) {
cpu               396 drivers/firmware/psci/psci_checker.c 		struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
cpu               401 drivers/firmware/psci/psci_checker.c 				cpu);
cpu               406 drivers/firmware/psci/psci_checker.c 					       (void *)(long)cpu, cpu,
cpu               409 drivers/firmware/psci/psci_checker.c 			pr_err("Failed to create kthread on CPU %d\n", cpu);
cpu               339 drivers/firmware/qcom_scm-32.c 	int cpu;
cpu               350 drivers/firmware/qcom_scm-32.c 	for_each_cpu(cpu, cpus) {
cpu               351 drivers/firmware/qcom_scm-32.c 		if (cpu < ARRAY_SIZE(scm_cb_flags))
cpu               352 drivers/firmware/qcom_scm-32.c 			flags |= scm_cb_flags[cpu];
cpu               354 drivers/firmware/qcom_scm-32.c 			set_cpu_present(cpu, false);
cpu               374 drivers/firmware/qcom_scm-32.c 	int cpu;
cpu               384 drivers/firmware/qcom_scm-32.c 	for_each_cpu(cpu, cpus) {
cpu               385 drivers/firmware/qcom_scm-32.c 		if (entry == qcom_scm_wb[cpu].entry)
cpu               387 drivers/firmware/qcom_scm-32.c 		flags |= qcom_scm_wb[cpu].flag;
cpu               399 drivers/firmware/qcom_scm-32.c 		for_each_cpu(cpu, cpus)
cpu               400 drivers/firmware/qcom_scm-32.c 			qcom_scm_wb[cpu].entry = entry;
cpu               556 drivers/firmware/stratix10-svc.c 	unsigned int cpu = 0;
cpu               563 drivers/firmware/stratix10-svc.c 						cpu_to_node(cpu),
cpu               803 drivers/firmware/stratix10-svc.c 	unsigned int cpu = 0;
cpu               814 drivers/firmware/stratix10-svc.c 					      cpu_to_node(cpu),
cpu               822 drivers/firmware/stratix10-svc.c 		kthread_bind(chan->ctrl->task, cpu);
cpu                55 drivers/firmware/trusted_foundations.c static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
cpu                77 drivers/gpio/gpio-mvebu.c #define GPIO_EDGE_MASK_MV78200_OFF(cpu)	  ((cpu) ? 0x30 : 0x18)
cpu                78 drivers/gpio/gpio-mvebu.c #define GPIO_LEVEL_MASK_MV78200_OFF(cpu)  ((cpu) ? 0x34 : 0x1C)
cpu                85 drivers/gpio/gpio-mvebu.c #define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4)
cpu                86 drivers/gpio/gpio-mvebu.c #define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu)  (0x10 + (cpu) * 0x4)
cpu                87 drivers/gpio/gpio-mvebu.c #define GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu) (0x20 + (cpu) * 0x4)
cpu               140 drivers/gpio/gpio-mvebu.c 	int cpu;
cpu               150 drivers/gpio/gpio-mvebu.c 		cpu = smp_processor_id();
cpu               152 drivers/gpio/gpio-mvebu.c 		*offset = GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu);
cpu               186 drivers/gpio/gpio-mvebu.c 	int cpu;
cpu               195 drivers/gpio/gpio-mvebu.c 		cpu = smp_processor_id();
cpu               197 drivers/gpio/gpio-mvebu.c 		*offset = GPIO_EDGE_MASK_MV78200_OFF(cpu);
cpu               200 drivers/gpio/gpio-mvebu.c 		cpu = smp_processor_id();
cpu               202 drivers/gpio/gpio-mvebu.c 		*offset = GPIO_EDGE_MASK_ARMADAXP_OFF(cpu);
cpu               236 drivers/gpio/gpio-mvebu.c 	int cpu;
cpu               245 drivers/gpio/gpio-mvebu.c 		cpu = smp_processor_id();
cpu               247 drivers/gpio/gpio-mvebu.c 		*offset = GPIO_LEVEL_MASK_MV78200_OFF(cpu);
cpu               250 drivers/gpio/gpio-mvebu.c 		cpu = smp_processor_id();
cpu               252 drivers/gpio/gpio-mvebu.c 		*offset = GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu);
cpu              1097 drivers/gpio/gpio-mvebu.c 	int i, cpu, id;
cpu              1173 drivers/gpio/gpio-mvebu.c 		for (cpu = 0; cpu < 2; cpu++) {
cpu              1175 drivers/gpio/gpio-mvebu.c 				     GPIO_EDGE_MASK_MV78200_OFF(cpu), 0);
cpu              1177 drivers/gpio/gpio-mvebu.c 				     GPIO_LEVEL_MASK_MV78200_OFF(cpu), 0);
cpu              1184 drivers/gpio/gpio-mvebu.c 		for (cpu = 0; cpu < 4; cpu++) {
cpu              1186 drivers/gpio/gpio-mvebu.c 				     GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu), 0);
cpu              1188 drivers/gpio/gpio-mvebu.c 				     GPIO_EDGE_MASK_ARMADAXP_OFF(cpu), 0);
cpu              1190 drivers/gpio/gpio-mvebu.c 				     GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu), 0);
cpu                21 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	u32 *cpu;
cpu                30 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	cpu = map + offset_in_page(offset);
cpu                33 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		drm_clflush_virt_range(cpu, sizeof(*cpu));
cpu                35 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	*cpu = v;
cpu                38 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		drm_clflush_virt_range(cpu, sizeof(*cpu));
cpu                53 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	u32 *cpu;
cpu                62 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	cpu = map + offset_in_page(offset);
cpu                65 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		drm_clflush_virt_range(cpu, sizeof(*cpu));
cpu                67 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	*v = *cpu;
cpu               117 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		u32 *cpu;
cpu               150 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		cpu = kmap(p) + offset_in_page(offset);
cpu               151 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		drm_clflush_virt_range(cpu, sizeof(*cpu));
cpu               152 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		if (*cpu != (u32)page) {
cpu               163 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			       (u32)page, *cpu);
cpu               166 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		*cpu = 0;
cpu               167 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		drm_clflush_virt_range(cpu, sizeof(*cpu));
cpu               288 drivers/gpu/drm/i915/i915_pmu.c 	int cpu;
cpu               293 drivers/gpu/drm/i915/i915_pmu.c 	for_each_possible_cpu(cpu)
cpu               294 drivers/gpu/drm/i915/i915_pmu.c 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
cpu               409 drivers/gpu/drm/i915/i915_pmu.c 	if (event->cpu < 0)
cpu               413 drivers/gpu/drm/i915/i915_pmu.c 	if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
cpu               986 drivers/gpu/drm/i915/i915_pmu.c static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
cpu               994 drivers/gpu/drm/i915/i915_pmu.c 		cpumask_set_cpu(cpu, &i915_pmu_cpumask);
cpu               999 drivers/gpu/drm/i915/i915_pmu.c static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
cpu              1006 drivers/gpu/drm/i915/i915_pmu.c 	if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
cpu              1007 drivers/gpu/drm/i915/i915_pmu.c 		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
cpu              1011 drivers/gpu/drm/i915/i915_pmu.c 			perf_pmu_migrate_context(&pmu->base, cpu, target);
cpu              1302 drivers/gpu/drm/i915/i915_request.c static unsigned long local_clock_us(unsigned int *cpu)
cpu              1318 drivers/gpu/drm/i915/i915_request.c 	*cpu = get_cpu();
cpu              1325 drivers/gpu/drm/i915/i915_request.c static bool busywait_stop(unsigned long timeout, unsigned int cpu)
cpu              1332 drivers/gpu/drm/i915/i915_request.c 	return this_cpu != cpu;
cpu              1338 drivers/gpu/drm/i915/i915_request.c 	unsigned int cpu;
cpu              1365 drivers/gpu/drm/i915/i915_request.c 	timeout_us += local_clock_us(&cpu);
cpu              1373 drivers/gpu/drm/i915/i915_request.c 		if (busywait_stop(timeout_us, cpu))
cpu               341 drivers/gpu/drm/i915/i915_utils.h 	int cpu, ret, timeout = (US) * 1000; \
cpu               346 drivers/gpu/drm/i915/i915_utils.h 		cpu = smp_processor_id(); \
cpu               366 drivers/gpu/drm/i915/i915_utils.h 			if (unlikely(cpu != smp_processor_id())) { \
cpu               368 drivers/gpu/drm/i915/i915_utils.h 				cpu = smp_processor_id(); \
cpu                43 drivers/gpu/drm/lima/lima_vm.c 		vm->bts[pbe].cpu[bte] = 0;
cpu                57 drivers/gpu/drm/lima/lima_vm.c 		if (!vm->bts[pbe].cpu) {
cpu                62 drivers/gpu/drm/lima/lima_vm.c 			vm->bts[pbe].cpu = dma_alloc_wc(
cpu                65 drivers/gpu/drm/lima/lima_vm.c 			if (!vm->bts[pbe].cpu) {
cpu                72 drivers/gpu/drm/lima/lima_vm.c 			pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
cpu                79 drivers/gpu/drm/lima/lima_vm.c 		vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
cpu               212 drivers/gpu/drm/lima/lima_vm.c 	vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
cpu               214 drivers/gpu/drm/lima/lima_vm.c 	if (!vm->pd.cpu)
cpu               230 drivers/gpu/drm/lima/lima_vm.c 	dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
cpu               244 drivers/gpu/drm/lima/lima_vm.c 		if (vm->bts[i].cpu)
cpu               246 drivers/gpu/drm/lima/lima_vm.c 				    vm->bts[i].cpu, vm->bts[i].dma);
cpu               249 drivers/gpu/drm/lima/lima_vm.c 	if (vm->pd.cpu)
cpu               250 drivers/gpu/drm/lima/lima_vm.c 		dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
cpu               260 drivers/gpu/drm/lima/lima_vm.c 	if (!vm->pd.cpu)
cpu               263 drivers/gpu/drm/lima/lima_vm.c 	pd = vm->pd.cpu;
cpu               265 drivers/gpu/drm/lima/lima_vm.c 		if (!vm->bts[i].cpu)
cpu               268 drivers/gpu/drm/lima/lima_vm.c 		pt = vm->bts[i].cpu;
cpu                25 drivers/gpu/drm/lima/lima_vm.h 	u32 *cpu;
cpu                61 drivers/gpu/drm/vc4/vc4_hdmi.c 	struct snd_soc_dai_link_component cpu;
cpu              1109 drivers/gpu/drm/vc4/vc4_hdmi.c 	dai_link->cpus		= &hdmi->audio.cpu;
cpu               768 drivers/hv/channel_mgmt.c 	int cpu;
cpu               787 drivers/hv/channel_mgmt.c 		for_each_online_cpu(cpu) {
cpu               789 drivers/hv/channel_mgmt.c 				= per_cpu_ptr(hv_context.cpu_context, cpu);
cpu               816 drivers/hv/channel_mgmt.c 	for_each_online_cpu(cpu) {
cpu               818 drivers/hv/channel_mgmt.c 			= per_cpu_ptr(hv_context.cpu_context, cpu);
cpu                76 drivers/hv/hv.c 	int cpu;
cpu                84 drivers/hv/hv.c 	for_each_present_cpu(cpu) {
cpu                85 drivers/hv/hv.c 		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
cpu                96 drivers/hv/hv.c 	for_each_present_cpu(cpu) {
cpu                97 drivers/hv/hv.c 		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
cpu               136 drivers/hv/hv.c 	int cpu;
cpu               138 drivers/hv/hv.c 	for_each_present_cpu(cpu) {
cpu               140 drivers/hv/hv.c 			= per_cpu_ptr(hv_context.cpu_context, cpu);
cpu               157 drivers/hv/hv.c void hv_synic_enable_regs(unsigned int cpu)
cpu               160 drivers/hv/hv.c 		= per_cpu_ptr(hv_context.cpu_context, cpu);
cpu               201 drivers/hv/hv.c int hv_synic_init(unsigned int cpu)
cpu               203 drivers/hv/hv.c 	hv_synic_enable_regs(cpu);
cpu               205 drivers/hv/hv.c 	hv_stimer_init(cpu);
cpu               213 drivers/hv/hv.c void hv_synic_disable_regs(unsigned int cpu)
cpu               246 drivers/hv/hv.c int hv_synic_cleanup(unsigned int cpu)
cpu               260 drivers/hv/hv.c 		if (channel->target_cpu == cpu) {
cpu               266 drivers/hv/hv.c 			if (sc->target_cpu == cpu) {
cpu               280 drivers/hv/hv.c 	hv_stimer_cleanup(cpu);
cpu               282 drivers/hv/hv.c 	hv_synic_disable_regs(cpu);
cpu               172 drivers/hv/hyperv_vmbus.h extern void hv_synic_enable_regs(unsigned int cpu);
cpu               173 drivers/hv/hyperv_vmbus.h extern int hv_synic_init(unsigned int cpu);
cpu               175 drivers/hv/hyperv_vmbus.h extern void hv_synic_disable_regs(unsigned int cpu);
cpu               176 drivers/hv/hyperv_vmbus.h extern int hv_synic_cleanup(unsigned int cpu);
cpu              1631 drivers/hv/vmbus_drv.c static VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
cpu              2352 drivers/hv/vmbus_drv.c 	int cpu;
cpu              2360 drivers/hv/vmbus_drv.c 	cpu = smp_processor_id();
cpu              2361 drivers/hv/vmbus_drv.c 	hv_stimer_cleanup(cpu);
cpu              2362 drivers/hv/vmbus_drv.c 	hv_synic_disable_regs(cpu);
cpu              2449 drivers/hv/vmbus_drv.c 	int cpu;
cpu              2459 drivers/hv/vmbus_drv.c 	for_each_online_cpu(cpu) {
cpu              2461 drivers/hv/vmbus_drv.c 			= per_cpu_ptr(hv_context.cpu_context, cpu);
cpu                49 drivers/hwmon/coretemp.c #define TO_CORE_ID(cpu)		(cpu_data(cpu).cpu_core_id)
cpu                50 drivers/hwmon/coretemp.c #define TO_ATTR_NO(cpu)		(TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
cpu                53 drivers/hwmon/coretemp.c #define for_each_sibling(i, cpu) \
cpu                54 drivers/hwmon/coretemp.c 	for_each_cpu(i, topology_sibling_cpumask(cpu))
cpu                56 drivers/hwmon/coretemp.c #define for_each_sibling(i, cpu)	for (i = 0; false; )
cpu                77 drivers/hwmon/coretemp.c 	unsigned int cpu;
cpu               126 drivers/hwmon/coretemp.c 	rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
cpu               162 drivers/hwmon/coretemp.c 		rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
cpu               407 drivers/hwmon/coretemp.c static int chk_ucode_version(unsigned int cpu)
cpu               409 drivers/hwmon/coretemp.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               423 drivers/hwmon/coretemp.c static struct platform_device *coretemp_get_pdev(unsigned int cpu)
cpu               425 drivers/hwmon/coretemp.c 	int id = topology_logical_die_id(cpu);
cpu               432 drivers/hwmon/coretemp.c static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
cpu               443 drivers/hwmon/coretemp.c 	tdata->cpu = cpu;
cpu               444 drivers/hwmon/coretemp.c 	tdata->cpu_core_id = TO_CORE_ID(cpu);
cpu               450 drivers/hwmon/coretemp.c static int create_core_data(struct platform_device *pdev, unsigned int cpu,
cpu               455 drivers/hwmon/coretemp.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               465 drivers/hwmon/coretemp.c 	attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
cpu               470 drivers/hwmon/coretemp.c 	tdata = init_temp_data(cpu, pkg_flag);
cpu               475 drivers/hwmon/coretemp.c 	err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
cpu               480 drivers/hwmon/coretemp.c 	tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
cpu               488 drivers/hwmon/coretemp.c 		err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET,
cpu               512 drivers/hwmon/coretemp.c coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
cpu               514 drivers/hwmon/coretemp.c 	if (create_core_data(pdev, cpu, pkg_flag))
cpu               515 drivers/hwmon/coretemp.c 		dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
cpu               567 drivers/hwmon/coretemp.c static struct platform_device *coretemp_device_add(unsigned int cpu)
cpu               569 drivers/hwmon/coretemp.c 	int err, zoneid = topology_logical_die_id(cpu);
cpu               589 drivers/hwmon/coretemp.c static int coretemp_cpu_online(unsigned int cpu)
cpu               591 drivers/hwmon/coretemp.c 	struct platform_device *pdev = coretemp_get_pdev(cpu);
cpu               592 drivers/hwmon/coretemp.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               612 drivers/hwmon/coretemp.c 		if (chk_ucode_version(cpu))
cpu               621 drivers/hwmon/coretemp.c 		pdev = coretemp_device_add(cpu);
cpu               630 drivers/hwmon/coretemp.c 			coretemp_add_core(pdev, cpu, 1);
cpu               638 drivers/hwmon/coretemp.c 	if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu)))
cpu               639 drivers/hwmon/coretemp.c 		coretemp_add_core(pdev, cpu, 0);
cpu               641 drivers/hwmon/coretemp.c 	cpumask_set_cpu(cpu, &pdata->cpumask);
cpu               645 drivers/hwmon/coretemp.c static int coretemp_cpu_offline(unsigned int cpu)
cpu               647 drivers/hwmon/coretemp.c 	struct platform_device *pdev = coretemp_get_pdev(cpu);
cpu               664 drivers/hwmon/coretemp.c 	indx = TO_ATTR_NO(cpu);
cpu               671 drivers/hwmon/coretemp.c 	cpumask_clear_cpu(cpu, &pd->cpumask);
cpu               678 drivers/hwmon/coretemp.c 	target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
cpu               681 drivers/hwmon/coretemp.c 	} else if (tdata && tdata->cpu == cpu) {
cpu               683 drivers/hwmon/coretemp.c 		tdata->cpu = target;
cpu               693 drivers/hwmon/coretemp.c 		zone_devices[topology_logical_die_id(cpu)] = NULL;
cpu               703 drivers/hwmon/coretemp.c 	if (tdata && tdata->cpu == cpu) {
cpu               706 drivers/hwmon/coretemp.c 		tdata->cpu = target;
cpu               141 drivers/hwmon/fam15h_power.c 	int cpu, cu;
cpu               143 drivers/hwmon/fam15h_power.c 	cpu = smp_processor_id();
cpu               149 drivers/hwmon/fam15h_power.c 	cu = cpu_data(cpu).cpu_core_id;
cpu               165 drivers/hwmon/fam15h_power.c 	int ret, cpu;
cpu               183 drivers/hwmon/fam15h_power.c 	for_each_online_cpu(cpu) {
cpu               184 drivers/hwmon/fam15h_power.c 		this_core = topology_core_id(cpu);
cpu               192 drivers/hwmon/fam15h_power.c 		cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
cpu               174 drivers/hwmon/ibmpowernv.c 	int cpu;
cpu               176 drivers/hwmon/ibmpowernv.c 	for_each_possible_cpu(cpu)
cpu               177 drivers/hwmon/ibmpowernv.c 		if (get_hard_smp_processor_id(cpu) == hwcpu)
cpu               178 drivers/hwmon/ibmpowernv.c 			return cpu;
cpu               207 drivers/hwmon/via-cputemp.c 	unsigned int cpu;
cpu               213 drivers/hwmon/via-cputemp.c static int via_cputemp_online(unsigned int cpu)
cpu               219 drivers/hwmon/via-cputemp.c 	pdev = platform_device_alloc(DRVNAME, cpu);
cpu               239 drivers/hwmon/via-cputemp.c 	pdev_entry->cpu = cpu;
cpu               254 drivers/hwmon/via-cputemp.c static int via_cputemp_down_prep(unsigned int cpu)
cpu               260 drivers/hwmon/via-cputemp.c 		if (p->cpu == cpu) {
cpu                88 drivers/hwtracing/coresight/coresight-cpu-debug.c 	int		cpu;
cpu               171 drivers/hwtracing/coresight/coresight-cpu-debug.c 			__func__, drvdata->cpu);
cpu               379 drivers/hwtracing/coresight/coresight-cpu-debug.c 	int cpu;
cpu               390 drivers/hwtracing/coresight/coresight-cpu-debug.c 	for_each_possible_cpu(cpu) {
cpu               391 drivers/hwtracing/coresight/coresight-cpu-debug.c 		drvdata = per_cpu(debug_drvdata, cpu);
cpu               395 drivers/hwtracing/coresight/coresight-cpu-debug.c 		dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
cpu               413 drivers/hwtracing/coresight/coresight-cpu-debug.c 	int cpu, ret = 0;
cpu               422 drivers/hwtracing/coresight/coresight-cpu-debug.c 	for_each_possible_cpu(cpu) {
cpu               423 drivers/hwtracing/coresight/coresight-cpu-debug.c 		drvdata = per_cpu(debug_drvdata, cpu);
cpu               431 drivers/hwtracing/coresight/coresight-cpu-debug.c 			cpumask_set_cpu(cpu, &mask);
cpu               441 drivers/hwtracing/coresight/coresight-cpu-debug.c 	for_each_cpu(cpu, &mask) {
cpu               442 drivers/hwtracing/coresight/coresight-cpu-debug.c 		drvdata = per_cpu(debug_drvdata, cpu);
cpu               452 drivers/hwtracing/coresight/coresight-cpu-debug.c 	int cpu, ret, err = 0;
cpu               459 drivers/hwtracing/coresight/coresight-cpu-debug.c 	for_each_possible_cpu(cpu) {
cpu               460 drivers/hwtracing/coresight/coresight-cpu-debug.c 		drvdata = per_cpu(debug_drvdata, cpu);
cpu               570 drivers/hwtracing/coresight/coresight-cpu-debug.c 	drvdata->cpu = coresight_get_cpu(dev);
cpu               571 drivers/hwtracing/coresight/coresight-cpu-debug.c 	if (drvdata->cpu < 0)
cpu               572 drivers/hwtracing/coresight/coresight-cpu-debug.c 		return drvdata->cpu;
cpu               574 drivers/hwtracing/coresight/coresight-cpu-debug.c 	if (per_cpu(debug_drvdata, drvdata->cpu)) {
cpu               576 drivers/hwtracing/coresight/coresight-cpu-debug.c 			drvdata->cpu);
cpu               591 drivers/hwtracing/coresight/coresight-cpu-debug.c 	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
cpu               592 drivers/hwtracing/coresight/coresight-cpu-debug.c 	ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
cpu               597 drivers/hwtracing/coresight/coresight-cpu-debug.c 		dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
cpu               603 drivers/hwtracing/coresight/coresight-cpu-debug.c 			drvdata->cpu);
cpu               620 drivers/hwtracing/coresight/coresight-cpu-debug.c 	dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
cpu               626 drivers/hwtracing/coresight/coresight-cpu-debug.c 	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
cpu               635 drivers/hwtracing/coresight/coresight-cpu-debug.c 	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
cpu               380 drivers/hwtracing/coresight/coresight-etb10.c 	node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
cpu                68 drivers/hwtracing/coresight/coresight-etm-perf.c etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
cpu                70 drivers/hwtracing/coresight/coresight-etm-perf.c 	return per_cpu_ptr(data->path, cpu);
cpu                74 drivers/hwtracing/coresight/coresight-etm-perf.c etm_event_cpu_path(struct etm_event_data *data, int cpu)
cpu                76 drivers/hwtracing/coresight/coresight-etm-perf.c 	return *etm_event_cpu_path_ptr(data, cpu);
cpu                84 drivers/hwtracing/coresight/coresight-etm-perf.c 	int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
cpu               125 drivers/hwtracing/coresight/coresight-etm-perf.c 	int cpu;
cpu               135 drivers/hwtracing/coresight/coresight-etm-perf.c 	cpu = cpumask_first(mask);
cpu               136 drivers/hwtracing/coresight/coresight-etm-perf.c 	sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
cpu               142 drivers/hwtracing/coresight/coresight-etm-perf.c 	int cpu;
cpu               152 drivers/hwtracing/coresight/coresight-etm-perf.c 	for_each_cpu(cpu, mask) {
cpu               155 drivers/hwtracing/coresight/coresight-etm-perf.c 		ppath = etm_event_cpu_path_ptr(event_data, cpu);
cpu               165 drivers/hwtracing/coresight/coresight-etm-perf.c static void *alloc_event_data(int cpu)
cpu               177 drivers/hwtracing/coresight/coresight-etm-perf.c 	if (cpu != -1)
cpu               178 drivers/hwtracing/coresight/coresight-etm-perf.c 		cpumask_set_cpu(cpu, mask);
cpu               211 drivers/hwtracing/coresight/coresight-etm-perf.c 	int cpu = event->cpu;
cpu               216 drivers/hwtracing/coresight/coresight-etm-perf.c 	event_data = alloc_event_data(cpu);
cpu               241 drivers/hwtracing/coresight/coresight-etm-perf.c 	for_each_cpu(cpu, mask) {
cpu               245 drivers/hwtracing/coresight/coresight-etm-perf.c 		csdev = per_cpu(csdev_src, cpu);
cpu               252 drivers/hwtracing/coresight/coresight-etm-perf.c 			cpumask_clear_cpu(cpu, mask);
cpu               263 drivers/hwtracing/coresight/coresight-etm-perf.c 			cpumask_clear_cpu(cpu, mask);
cpu               267 drivers/hwtracing/coresight/coresight-etm-perf.c 		*etm_event_cpu_path_ptr(event_data, cpu) = path;
cpu               271 drivers/hwtracing/coresight/coresight-etm-perf.c 	cpu = cpumask_first(mask);
cpu               272 drivers/hwtracing/coresight/coresight-etm-perf.c 	if (cpu >= nr_cpu_ids)
cpu               296 drivers/hwtracing/coresight/coresight-etm-perf.c 	int cpu = smp_processor_id();
cpu               299 drivers/hwtracing/coresight/coresight-etm-perf.c 	struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
cpu               313 drivers/hwtracing/coresight/coresight-etm-perf.c 	path = etm_event_cpu_path(event_data, cpu);
cpu               345 drivers/hwtracing/coresight/coresight-etm-perf.c 	int cpu = smp_processor_id();
cpu               347 drivers/hwtracing/coresight/coresight-etm-perf.c 	struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
cpu               358 drivers/hwtracing/coresight/coresight-etm-perf.c 	path = etm_event_cpu_path(event_data, cpu);
cpu               488 drivers/hwtracing/coresight/coresight-etm-perf.c 	int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
cpu               492 drivers/hwtracing/coresight/coresight-etm-perf.c 	sprintf(entry, "cpu%d", cpu);
cpu               501 drivers/hwtracing/coresight/coresight-etm-perf.c 		per_cpu(csdev_src, cpu) = csdev;
cpu               504 drivers/hwtracing/coresight/coresight-etm-perf.c 		per_cpu(csdev_src, cpu) = NULL;
cpu               237 drivers/hwtracing/coresight/coresight-etm.h 	int				cpu;
cpu              1183 drivers/hwtracing/coresight/coresight-etm3x-sysfs.c 	val = drvdata->cpu;
cpu              1187 drivers/hwtracing/coresight/coresight-etm3x-sysfs.c static DEVICE_ATTR_RO(cpu);
cpu               429 drivers/hwtracing/coresight/coresight-etm3x.c 		drvdata->cpu, rc);
cpu               451 drivers/hwtracing/coresight/coresight-etm3x.c 	return drvdata->cpu;
cpu               495 drivers/hwtracing/coresight/coresight-etm3x.c 	if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
cpu               516 drivers/hwtracing/coresight/coresight-etm3x.c 	if (cpu_online(drvdata->cpu)) {
cpu               518 drivers/hwtracing/coresight/coresight-etm3x.c 		ret = smp_call_function_single(drvdata->cpu,
cpu               587 drivers/hwtracing/coresight/coresight-etm3x.c 		"cpu: %d disable smp call done\n", drvdata->cpu);
cpu               594 drivers/hwtracing/coresight/coresight-etm3x.c 	if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
cpu               629 drivers/hwtracing/coresight/coresight-etm3x.c 	smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
cpu               679 drivers/hwtracing/coresight/coresight-etm3x.c static int etm_online_cpu(unsigned int cpu)
cpu               681 drivers/hwtracing/coresight/coresight-etm3x.c 	if (!etmdrvdata[cpu])
cpu               684 drivers/hwtracing/coresight/coresight-etm3x.c 	if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
cpu               685 drivers/hwtracing/coresight/coresight-etm3x.c 		coresight_enable(etmdrvdata[cpu]->csdev);
cpu               689 drivers/hwtracing/coresight/coresight-etm3x.c static int etm_starting_cpu(unsigned int cpu)
cpu               691 drivers/hwtracing/coresight/coresight-etm3x.c 	if (!etmdrvdata[cpu])
cpu               694 drivers/hwtracing/coresight/coresight-etm3x.c 	spin_lock(&etmdrvdata[cpu]->spinlock);
cpu               695 drivers/hwtracing/coresight/coresight-etm3x.c 	if (!etmdrvdata[cpu]->os_unlock) {
cpu               696 drivers/hwtracing/coresight/coresight-etm3x.c 		etm_os_unlock(etmdrvdata[cpu]);
cpu               697 drivers/hwtracing/coresight/coresight-etm3x.c 		etmdrvdata[cpu]->os_unlock = true;
cpu               700 drivers/hwtracing/coresight/coresight-etm3x.c 	if (local_read(&etmdrvdata[cpu]->mode))
cpu               701 drivers/hwtracing/coresight/coresight-etm3x.c 		etm_enable_hw(etmdrvdata[cpu]);
cpu               702 drivers/hwtracing/coresight/coresight-etm3x.c 	spin_unlock(&etmdrvdata[cpu]->spinlock);
cpu               706 drivers/hwtracing/coresight/coresight-etm3x.c static int etm_dying_cpu(unsigned int cpu)
cpu               708 drivers/hwtracing/coresight/coresight-etm3x.c 	if (!etmdrvdata[cpu])
cpu               711 drivers/hwtracing/coresight/coresight-etm3x.c 	spin_lock(&etmdrvdata[cpu]->spinlock);
cpu               712 drivers/hwtracing/coresight/coresight-etm3x.c 	if (local_read(&etmdrvdata[cpu]->mode))
cpu               713 drivers/hwtracing/coresight/coresight-etm3x.c 		etm_disable_hw(etmdrvdata[cpu]);
cpu               714 drivers/hwtracing/coresight/coresight-etm3x.c 	spin_unlock(&etmdrvdata[cpu]->spinlock);
cpu               782 drivers/hwtracing/coresight/coresight-etm3x.c 	drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
cpu               818 drivers/hwtracing/coresight/coresight-etm3x.c 	drvdata->cpu = coresight_get_cpu(dev);
cpu               819 drivers/hwtracing/coresight/coresight-etm3x.c 	if (drvdata->cpu < 0)
cpu               820 drivers/hwtracing/coresight/coresight-etm3x.c 		return drvdata->cpu;
cpu               822 drivers/hwtracing/coresight/coresight-etm3x.c 	desc.name  = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
cpu               827 drivers/hwtracing/coresight/coresight-etm3x.c 	etmdrvdata[drvdata->cpu] = drvdata;
cpu               829 drivers/hwtracing/coresight/coresight-etm3x.c 	if (smp_call_function_single(drvdata->cpu,
cpu               266 drivers/hwtracing/coresight/coresight-etm4x-sysfs.c 	drvdata->trcid = drvdata->cpu + 1;
cpu              2005 drivers/hwtracing/coresight/coresight-etm4x-sysfs.c 	val = drvdata->cpu;
cpu              2009 drivers/hwtracing/coresight/coresight-etm4x-sysfs.c static DEVICE_ATTR_RO(cpu);
cpu              2083 drivers/hwtracing/coresight/coresight-etm4x-sysfs.c 	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
cpu                73 drivers/hwtracing/coresight/coresight-etm4x.c 	return drvdata->cpu;
cpu               203 drivers/hwtracing/coresight/coresight-etm4x.c 		drvdata->cpu, rc);
cpu               374 drivers/hwtracing/coresight/coresight-etm4x.c 	if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
cpu               403 drivers/hwtracing/coresight/coresight-etm4x.c 	ret = smp_call_function_single(drvdata->cpu,
cpu               478 drivers/hwtracing/coresight/coresight-etm4x.c 		"cpu: %d disable smp call done\n", drvdata->cpu);
cpu               488 drivers/hwtracing/coresight/coresight-etm4x.c 	if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
cpu               523 drivers/hwtracing/coresight/coresight-etm4x.c 	smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
cpu              1046 drivers/hwtracing/coresight/coresight-etm4x.c static int etm4_online_cpu(unsigned int cpu)
cpu              1048 drivers/hwtracing/coresight/coresight-etm4x.c 	if (!etmdrvdata[cpu])
cpu              1051 drivers/hwtracing/coresight/coresight-etm4x.c 	if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
cpu              1052 drivers/hwtracing/coresight/coresight-etm4x.c 		coresight_enable(etmdrvdata[cpu]->csdev);
cpu              1056 drivers/hwtracing/coresight/coresight-etm4x.c static int etm4_starting_cpu(unsigned int cpu)
cpu              1058 drivers/hwtracing/coresight/coresight-etm4x.c 	if (!etmdrvdata[cpu])
cpu              1061 drivers/hwtracing/coresight/coresight-etm4x.c 	spin_lock(&etmdrvdata[cpu]->spinlock);
cpu              1062 drivers/hwtracing/coresight/coresight-etm4x.c 	if (!etmdrvdata[cpu]->os_unlock)
cpu              1063 drivers/hwtracing/coresight/coresight-etm4x.c 		etm4_os_unlock(etmdrvdata[cpu]);
cpu              1065 drivers/hwtracing/coresight/coresight-etm4x.c 	if (local_read(&etmdrvdata[cpu]->mode))
cpu              1066 drivers/hwtracing/coresight/coresight-etm4x.c 		etm4_enable_hw(etmdrvdata[cpu]);
cpu              1067 drivers/hwtracing/coresight/coresight-etm4x.c 	spin_unlock(&etmdrvdata[cpu]->spinlock);
cpu              1071 drivers/hwtracing/coresight/coresight-etm4x.c static int etm4_dying_cpu(unsigned int cpu)
cpu              1073 drivers/hwtracing/coresight/coresight-etm4x.c 	if (!etmdrvdata[cpu])
cpu              1076 drivers/hwtracing/coresight/coresight-etm4x.c 	spin_lock(&etmdrvdata[cpu]->spinlock);
cpu              1077 drivers/hwtracing/coresight/coresight-etm4x.c 	if (local_read(&etmdrvdata[cpu]->mode))
cpu              1078 drivers/hwtracing/coresight/coresight-etm4x.c 		etm4_disable_hw(etmdrvdata[cpu]);
cpu              1079 drivers/hwtracing/coresight/coresight-etm4x.c 	spin_unlock(&etmdrvdata[cpu]->spinlock);
cpu              1085 drivers/hwtracing/coresight/coresight-etm4x.c 	drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
cpu              1113 drivers/hwtracing/coresight/coresight-etm4x.c 	drvdata->cpu = coresight_get_cpu(dev);
cpu              1114 drivers/hwtracing/coresight/coresight-etm4x.c 	if (drvdata->cpu < 0)
cpu              1115 drivers/hwtracing/coresight/coresight-etm4x.c 		return drvdata->cpu;
cpu              1117 drivers/hwtracing/coresight/coresight-etm4x.c 	desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
cpu              1122 drivers/hwtracing/coresight/coresight-etm4x.c 	etmdrvdata[drvdata->cpu] = drvdata;
cpu              1124 drivers/hwtracing/coresight/coresight-etm4x.c 	if (smp_call_function_single(drvdata->cpu,
cpu              1177 drivers/hwtracing/coresight/coresight-etm4x.c 		 drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
cpu               345 drivers/hwtracing/coresight/coresight-etm4x.h 	int				cpu;
cpu               151 drivers/hwtracing/coresight/coresight-platform.c 	int cpu;
cpu               161 drivers/hwtracing/coresight/coresight-platform.c 	cpu = of_cpu_node_to_id(dn);
cpu               164 drivers/hwtracing/coresight/coresight-platform.c 	return cpu;
cpu               724 drivers/hwtracing/coresight/coresight-platform.c 	int cpu;
cpu               735 drivers/hwtracing/coresight/coresight-platform.c 	cpu = acpi_handle_to_logical_cpuid(cpu_handle);
cpu               736 drivers/hwtracing/coresight/coresight-platform.c 	if (cpu >= nr_cpu_ids)
cpu               738 drivers/hwtracing/coresight/coresight-platform.c 	return cpu;
cpu               395 drivers/hwtracing/coresight/coresight-tmc-etf.c 	node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
cpu              1210 drivers/hwtracing/coresight/coresight-tmc-etr.c 	node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
cpu              1320 drivers/hwtracing/coresight/coresight-tmc-etr.c 	if (event->cpu == -1)
cpu              1336 drivers/hwtracing/coresight/coresight-tmc-etr.c 	node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
cpu               743 drivers/hwtracing/coresight/coresight.c 	int cpu, ret = 0;
cpu               801 drivers/hwtracing/coresight/coresight.c 		cpu = source_ops(csdev)->cpu_id(csdev);
cpu               802 drivers/hwtracing/coresight/coresight.c 		per_cpu(tracer_path, cpu) = path;
cpu               827 drivers/hwtracing/coresight/coresight.c 	int cpu, ret;
cpu               841 drivers/hwtracing/coresight/coresight.c 		cpu = source_ops(csdev)->cpu_id(csdev);
cpu               842 drivers/hwtracing/coresight/coresight.c 		path = per_cpu(tracer_path, cpu);
cpu               843 drivers/hwtracing/coresight/coresight.c 		per_cpu(tracer_path, cpu) = NULL;
cpu               905 drivers/idle/intel_idle.c 	int cpu = smp_processor_id();
cpu               912 drivers/idle/intel_idle.c 		leave_mm(cpu);
cpu              1170 drivers/idle/intel_idle.c 	int cpu, package_num, num_sockets = 1;
cpu              1172 drivers/idle/intel_idle.c 	for_each_online_cpu(cpu) {
cpu              1173 drivers/idle/intel_idle.c 		package_num = topology_physical_package_id(cpu);
cpu              1388 drivers/idle/intel_idle.c static int intel_idle_cpu_init(unsigned int cpu)
cpu              1392 drivers/idle/intel_idle.c 	dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
cpu              1393 drivers/idle/intel_idle.c 	dev->cpu = cpu;
cpu              1396 drivers/idle/intel_idle.c 		pr_debug("cpuidle_register_device %d failed!\n", cpu);
cpu              1409 drivers/idle/intel_idle.c static int intel_idle_cpu_online(unsigned int cpu)
cpu              1421 drivers/idle/intel_idle.c 	dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
cpu              1423 drivers/idle/intel_idle.c 		return intel_idle_cpu_init(cpu);
cpu                30 drivers/infiniband/hw/efa/efa.h 	int cpu;
cpu               108 drivers/infiniband/hw/efa/efa_main.c 	u32 cpu;
cpu               116 drivers/infiniband/hw/efa/efa_main.c 	cpu = cpumask_first(cpu_online_mask);
cpu               117 drivers/infiniband/hw/efa/efa_main.c 	dev->admin_irq.cpu = cpu;
cpu               118 drivers/infiniband/hw/efa/efa_main.c 	cpumask_set_cpu(cpu,
cpu               105 drivers/infiniband/hw/hfi1/affinity.c 	int cpu;
cpu               115 drivers/infiniband/hw/hfi1/affinity.c 	cpu = cpumask_first(diff);
cpu               116 drivers/infiniband/hw/hfi1/affinity.c 	if (cpu >= nr_cpu_ids) /* empty */
cpu               117 drivers/infiniband/hw/hfi1/affinity.c 		cpu = -EINVAL;
cpu               119 drivers/infiniband/hw/hfi1/affinity.c 		cpumask_set_cpu(cpu, &set->used);
cpu               121 drivers/infiniband/hw/hfi1/affinity.c 	return cpu;
cpu               124 drivers/infiniband/hw/hfi1/affinity.c static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
cpu               129 drivers/infiniband/hw/hfi1/affinity.c 	cpumask_clear_cpu(cpu, &set->used);
cpu               367 drivers/infiniband/hw/hfi1/affinity.c 	int cpu;
cpu               372 drivers/infiniband/hw/hfi1/affinity.c 		cpu = -1;
cpu               377 drivers/infiniband/hw/hfi1/affinity.c 		cpu = -1;
cpu               391 drivers/infiniband/hw/hfi1/affinity.c 		cpu = cpumask_first(non_intr_cpus);
cpu               393 drivers/infiniband/hw/hfi1/affinity.c 		cpu = cpumask_first(available_cpus);
cpu               395 drivers/infiniband/hw/hfi1/affinity.c 	if (cpu >= nr_cpu_ids) { /* empty */
cpu               396 drivers/infiniband/hw/hfi1/affinity.c 		cpu = -1;
cpu               399 drivers/infiniband/hw/hfi1/affinity.c 	cpumask_set_cpu(cpu, &set->used);
cpu               402 drivers/infiniband/hw/hfi1/affinity.c 	return cpu;
cpu               405 drivers/infiniband/hw/hfi1/affinity.c static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
cpu               409 drivers/infiniband/hw/hfi1/affinity.c 	if (cpu < 0)
cpu               412 drivers/infiniband/hw/hfi1/affinity.c 	cpu_mask_set_put(set, cpu);
cpu               418 drivers/infiniband/hw/hfi1/affinity.c 	int i, cpu;
cpu               424 drivers/infiniband/hw/hfi1/affinity.c 		cpu = dd->comp_vect_mappings[i];
cpu               425 drivers/infiniband/hw/hfi1/affinity.c 		_dev_comp_vect_cpu_put(dd, cpu);
cpu               429 drivers/infiniband/hw/hfi1/affinity.c 			  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i);
cpu               444 drivers/infiniband/hw/hfi1/affinity.c 	int i, cpu, ret;
cpu               469 drivers/infiniband/hw/hfi1/affinity.c 		cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus,
cpu               471 drivers/infiniband/hw/hfi1/affinity.c 		if (cpu < 0) {
cpu               476 drivers/infiniband/hw/hfi1/affinity.c 		dd->comp_vect_mappings[i] = cpu;
cpu               479 drivers/infiniband/hw/hfi1/affinity.c 			  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
cpu               604 drivers/infiniband/hw/hfi1/affinity.c 	int i, cpu;
cpu               611 drivers/infiniband/hw/hfi1/affinity.c 		cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask,
cpu               614 drivers/infiniband/hw/hfi1/affinity.c 		if (cpu >= 0)
cpu               615 drivers/infiniband/hw/hfi1/affinity.c 			cpumask_clear_cpu(cpu, &dd->comp_vect->mask);
cpu               791 drivers/infiniband/hw/hfi1/affinity.c static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
cpu               799 drivers/infiniband/hw/hfi1/affinity.c 	if (cpu > num_online_cpus() || cpu == sde->cpu)
cpu               807 drivers/infiniband/hw/hfi1/affinity.c 	old_cpu = sde->cpu;
cpu               808 drivers/infiniband/hw/hfi1/affinity.c 	sde->cpu = cpu;
cpu               810 drivers/infiniband/hw/hfi1/affinity.c 	cpumask_set_cpu(cpu, &msix->mask);
cpu               813 drivers/infiniband/hw/hfi1/affinity.c 		   sde->this_idx, cpu);
cpu               821 drivers/infiniband/hw/hfi1/affinity.c 	cpumask_set_cpu(cpu, &set->mask);
cpu               822 drivers/infiniband/hw/hfi1/affinity.c 	cpumask_set_cpu(cpu, &set->used);
cpu               842 drivers/infiniband/hw/hfi1/affinity.c 	int cpu = cpumask_first(mask);
cpu               848 drivers/infiniband/hw/hfi1/affinity.c 	hfi1_update_sdma_affinity(msix, cpu);
cpu               894 drivers/infiniband/hw/hfi1/affinity.c 	int cpu = -1;
cpu               908 drivers/infiniband/hw/hfi1/affinity.c 		cpu = cpumask_first(&entry->general_intr_mask);
cpu               913 drivers/infiniband/hw/hfi1/affinity.c 			cpu = cpumask_first(&entry->general_intr_mask);
cpu               928 drivers/infiniband/hw/hfi1/affinity.c 	if (cpu == -1 && set) {
cpu               932 drivers/infiniband/hw/hfi1/affinity.c 		cpu = cpu_mask_set_get_first(set, diff);
cpu               933 drivers/infiniband/hw/hfi1/affinity.c 		if (cpu < 0) {
cpu               936 drivers/infiniband/hw/hfi1/affinity.c 			return cpu;
cpu               942 drivers/infiniband/hw/hfi1/affinity.c 	cpumask_set_cpu(cpu, &msix->mask);
cpu               945 drivers/infiniband/hw/hfi1/affinity.c 		    extra, cpu);
cpu               949 drivers/infiniband/hw/hfi1/affinity.c 		sde->cpu = cpu;
cpu              1039 drivers/infiniband/hw/hfi1/affinity.c 	int cpu = -1, ret, i;
cpu              1059 drivers/infiniband/hw/hfi1/affinity.c 		cpu = cpumask_first(proc_mask);
cpu              1060 drivers/infiniband/hw/hfi1/affinity.c 		cpumask_set_cpu(cpu, &set->used);
cpu              1203 drivers/infiniband/hw/hfi1/affinity.c 	cpu = cpumask_first(available_mask);
cpu              1204 drivers/infiniband/hw/hfi1/affinity.c 	if (cpu >= nr_cpu_ids) /* empty */
cpu              1205 drivers/infiniband/hw/hfi1/affinity.c 		cpu = -1;
cpu              1207 drivers/infiniband/hw/hfi1/affinity.c 		cpumask_set_cpu(cpu, &set->used);
cpu              1210 drivers/infiniband/hw/hfi1/affinity.c 	hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
cpu              1220 drivers/infiniband/hw/hfi1/affinity.c 	return cpu;
cpu              1223 drivers/infiniband/hw/hfi1/affinity.c void hfi1_put_proc_affinity(int cpu)
cpu              1228 drivers/infiniband/hw/hfi1/affinity.c 	if (cpu < 0)
cpu              1232 drivers/infiniband/hw/hfi1/affinity.c 	cpu_mask_set_put(set, cpu);
cpu              1233 drivers/infiniband/hw/hfi1/affinity.c 	hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
cpu                97 drivers/infiniband/hw/hfi1/affinity.h void hfi1_put_proc_affinity(int cpu);
cpu              1621 drivers/infiniband/hw/hfi1/chip.c 	int cpu;
cpu              1624 drivers/infiniband/hw/hfi1/chip.c 	for_each_possible_cpu(cpu)
cpu              1625 drivers/infiniband/hw/hfi1/chip.c 		counter += *per_cpu_ptr(cntr, cpu);
cpu               190 drivers/infiniband/hw/hfi1/iowait.h 				   struct workqueue_struct *wq, int cpu)
cpu               192 drivers/infiniband/hw/hfi1/iowait.h 	return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork);
cpu               202 drivers/infiniband/hw/hfi1/iowait.h 				       struct workqueue_struct *wq, int cpu)
cpu               204 drivers/infiniband/hw/hfi1/iowait.h 	return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_TID_SE].iowork);
cpu               676 drivers/infiniband/hw/hfi1/pio.c 	int cpu;
cpu               679 drivers/infiniband/hw/hfi1/pio.c 	for_each_possible_cpu(cpu)
cpu               680 drivers/infiniband/hw/hfi1/pio.c 		ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
cpu               686 drivers/infiniband/hw/hfi1/pio.c 	int cpu;
cpu               688 drivers/infiniband/hw/hfi1/pio.c 	for_each_possible_cpu(cpu)
cpu               689 drivers/infiniband/hw/hfi1/pio.c 		(*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
cpu               388 drivers/infiniband/hw/hfi1/qp.c 			       priv->s_sde->cpu :
cpu               477 drivers/infiniband/hw/hfi1/ruc.c 		    workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
cpu               588 drivers/infiniband/hw/hfi1/ruc.c 	ps.cpu = priv->s_sde ? priv->s_sde->cpu :
cpu               938 drivers/infiniband/hw/hfi1/sdma.c 	unsigned long cpu;
cpu               970 drivers/infiniband/hw/hfi1/sdma.c 	for_each_cpu(cpu, mask) {
cpu               972 drivers/infiniband/hw/hfi1/sdma.c 		if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
cpu               973 drivers/infiniband/hw/hfi1/sdma.c 			cpumask_set_cpu(cpu, new_mask);
cpu               977 drivers/infiniband/hw/hfi1/sdma.c 		rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
cpu               992 drivers/infiniband/hw/hfi1/sdma.c 			rht_node->cpu_id = cpu;
cpu              1004 drivers/infiniband/hw/hfi1/sdma.c 					   cpu);
cpu              1029 drivers/infiniband/hw/hfi1/sdma.c 		cpumask_set_cpu(cpu, new_mask);
cpu              1033 drivers/infiniband/hw/hfi1/sdma.c 	for_each_cpu(cpu, cpu_online_mask) {
cpu              1037 drivers/infiniband/hw/hfi1/sdma.c 		if (cpumask_test_cpu(cpu, mask))
cpu              1040 drivers/infiniband/hw/hfi1/sdma.c 		rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
cpu              2223 drivers/infiniband/hw/hfi1/sdma.c 		   sde->cpu,
cpu              2433 drivers/infiniband/hw/hfi1/sdma.c 	queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
cpu              2531 drivers/infiniband/hw/hfi1/sdma.c 	queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
cpu               337 drivers/infiniband/hw/hfi1/sdma.h 	int cpu;
cpu               925 drivers/infiniband/hw/hfi1/sdma.h 	iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
cpu               623 drivers/infiniband/hw/hfi1/tid_rdma.c 			     priv->s_sde->cpu :
cpu              5362 drivers/infiniband/hw/hfi1/tid_rdma.c 	ps.cpu = priv->s_sde ? priv->s_sde->cpu :
cpu              5413 drivers/infiniband/hw/hfi1/tid_rdma.c 				   priv->s_sde->cpu :
cpu               241 drivers/infiniband/hw/hfi1/verbs.h 	int cpu;
cpu              1171 drivers/infiniband/hw/qib/qib_file_ops.c 		int cpu;
cpu              1173 drivers/infiniband/hw/qib/qib_file_ops.c 		cpu = find_first_zero_bit(qib_cpulist,
cpu              1175 drivers/infiniband/hw/qib/qib_file_ops.c 		if (cpu == qib_cpulist_count)
cpu              1180 drivers/infiniband/hw/qib/qib_file_ops.c 			__set_bit(cpu, qib_cpulist);
cpu              1181 drivers/infiniband/hw/qib/qib_file_ops.c 			fd->rec_cpu_num = cpu;
cpu              1528 drivers/infiniband/hw/qib/qib_file_ops.c static int find_hca(unsigned int cpu, int *unit)
cpu              1551 drivers/infiniband/hw/qib/qib_file_ops.c 			if (cpu_to_node(cpu) ==
cpu              1626 drivers/infiniband/hw/qib/qib_file_ops.c 		const unsigned int cpu = cpumask_first(current->cpus_ptr);
cpu              1629 drivers/infiniband/hw/qib/qib_file_ops.c 		if (weight == 1 && !test_bit(cpu, qib_cpulist))
cpu              1630 drivers/infiniband/hw/qib/qib_file_ops.c 			if (!find_hca(cpu, &unit) && unit >= 0)
cpu              2693 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
cpu              2700 drivers/infiniband/hw/qib/qib_iba7322.c 	if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
cpu              2703 drivers/infiniband/hw/qib/qib_iba7322.c 		cspec->rhdr_cpu[rcd->ctxt] = cpu;
cpu              2707 drivers/infiniband/hw/qib/qib_iba7322.c 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
cpu              2709 drivers/infiniband/hw/qib/qib_iba7322.c 			"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
cpu              2718 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
cpu              2726 drivers/infiniband/hw/qib/qib_iba7322.c 	if (cspec->sdma_cpu[pidx] != cpu) {
cpu              2727 drivers/infiniband/hw/qib/qib_iba7322.c 		cspec->sdma_cpu[pidx] = cpu;
cpu              2732 drivers/infiniband/hw/qib/qib_iba7322.c 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
cpu              2737 drivers/infiniband/hw/qib/qib_iba7322.c 			"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
cpu              2792 drivers/infiniband/hw/qib/qib_iba7322.c 	int cpu = cpumask_first(mask);
cpu              2797 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_update_rhdrq_dca(rcd, cpu);
cpu              2801 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_update_sdma_dca(ppd, cpu);
cpu              1049 drivers/infiniband/hw/qib/qib_init.c 	int cpu;
cpu              1052 drivers/infiniband/hw/qib/qib_init.c 	for_each_possible_cpu(cpu)
cpu              1053 drivers/infiniband/hw/qib/qib_init.c 		int_counter += *per_cpu_ptr(dd->int_counter, cpu);
cpu              1646 drivers/infiniband/hw/qib/qib_mad.c 	int cpu;
cpu              1649 drivers/infiniband/hw/qib/qib_mad.c 	for_each_possible_cpu(cpu) {
cpu              1650 drivers/infiniband/hw/qib/qib_mad.c 		p = per_cpu_ptr(ibp->pmastats, cpu);
cpu               469 drivers/infiniband/hw/qib/qib_sysfs.c 	int cpu;
cpu               472 drivers/infiniband/hw/qib/qib_sysfs.c 	for_each_possible_cpu(cpu)
cpu               473 drivers/infiniband/hw/qib/qib_sysfs.c 		counter += *per_cpu_ptr(cntr, cpu);
cpu               553 drivers/infiniband/sw/siw/siw.h void siw_put_tx_cpu(int cpu);
cpu                91 drivers/infiniband/sw/siw/siw_main.c 	int cpu, assigned = 0;
cpu                93 drivers/infiniband/sw/siw/siw_main.c 	for_each_online_cpu(cpu) {
cpu                95 drivers/infiniband/sw/siw/siw_main.c 		if (cpu % cpumask_weight(topology_sibling_cpumask(cpu)))
cpu                98 drivers/infiniband/sw/siw/siw_main.c 		siw_tx_thread[cpu] =
cpu                99 drivers/infiniband/sw/siw/siw_main.c 			kthread_create(siw_run_sq, (unsigned long *)(long)cpu,
cpu               100 drivers/infiniband/sw/siw/siw_main.c 				       "siw_tx/%d", cpu);
cpu               101 drivers/infiniband/sw/siw/siw_main.c 		if (IS_ERR(siw_tx_thread[cpu])) {
cpu               102 drivers/infiniband/sw/siw/siw_main.c 			siw_tx_thread[cpu] = NULL;
cpu               105 drivers/infiniband/sw/siw/siw_main.c 		kthread_bind(siw_tx_thread[cpu], cpu);
cpu               107 drivers/infiniband/sw/siw/siw_main.c 		wake_up_process(siw_tx_thread[cpu]);
cpu               188 drivers/infiniband/sw/siw/siw_main.c 	int i, num_cpus, cpu, min_use, node = sdev->numa_node, tx_cpu = -1;
cpu               204 drivers/infiniband/sw/siw/siw_main.c 	cpu = cpumask_first(tx_cpumask);
cpu               207 drivers/infiniband/sw/siw/siw_main.c 	     i++, cpu = cpumask_next(cpu, tx_cpumask)) {
cpu               211 drivers/infiniband/sw/siw/siw_main.c 		if (!siw_tx_thread[cpu])
cpu               214 drivers/infiniband/sw/siw/siw_main.c 		usage = atomic_read(&per_cpu(siw_use_cnt, cpu));
cpu               216 drivers/infiniband/sw/siw/siw_main.c 			tx_cpu = cpu;
cpu               232 drivers/infiniband/sw/siw/siw_main.c void siw_put_tx_cpu(int cpu)
cpu               234 drivers/infiniband/sw/siw/siw_main.c 	atomic_dec(&per_cpu(siw_use_cnt, cpu));
cpu               639 drivers/infiniband/sw/siw/siw_main.c 	int cpu;
cpu               641 drivers/infiniband/sw/siw/siw_main.c 	for_each_possible_cpu(cpu) {
cpu               642 drivers/infiniband/sw/siw/siw_main.c 		if (siw_tx_thread[cpu]) {
cpu               643 drivers/infiniband/sw/siw/siw_main.c 			siw_stop_tx_thread(cpu);
cpu               644 drivers/infiniband/sw/siw/siw_main.c 			siw_tx_thread[cpu] = NULL;
cpu              3778 drivers/infiniband/ulp/srp/ib_srp.c 	int ret, node_idx, node, cpu, i;
cpu              3923 drivers/infiniband/ulp/srp/ib_srp.c 		for_each_online_cpu(cpu) {
cpu              3924 drivers/infiniband/ulp/srp/ib_srp.c 			if (cpu_to_node(cpu) != node)
cpu              1220 drivers/infiniband/ulp/srpt/ib_srpt.c 	int tag, cpu;
cpu              1224 drivers/infiniband/ulp/srpt/ib_srpt.c 	tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
cpu              1242 drivers/infiniband/ulp/srpt/ib_srpt.c 	ioctx->cmd.map_cpu = cpu;
cpu              4589 drivers/iommu/amd_iommu.c int amd_iommu_update_ga(int cpu, bool is_run, void *data)
cpu              4614 drivers/iommu/amd_iommu.c 		if (cpu >= 0) {
cpu              4616 drivers/iommu/amd_iommu.c 						APICID_TO_IRTE_DEST_LO(cpu);
cpu              4618 drivers/iommu/amd_iommu.c 						APICID_TO_IRTE_DEST_HI(cpu);
cpu               763 drivers/iommu/fsl_pamu_domain.c 					    stash_attr->cpu);
cpu              4717 drivers/iommu/intel-iommu.c static void free_all_cpu_cached_iovas(unsigned int cpu)
cpu              4734 drivers/iommu/intel-iommu.c 			free_cpu_cached_iovas(cpu, &domain->iovad);
cpu              4739 drivers/iommu/intel-iommu.c static int intel_iommu_cpu_dead(unsigned int cpu)
cpu              4741 drivers/iommu/intel-iommu.c 	free_all_cpu_cached_iovas(cpu);
cpu                83 drivers/iommu/iova.c 	int cpu;
cpu                95 drivers/iommu/iova.c 	for_each_possible_cpu(cpu) {
cpu                98 drivers/iommu/iova.c 		fq = per_cpu_ptr(queue, cpu);
cpu               425 drivers/iommu/iova.c 		unsigned int cpu;
cpu               432 drivers/iommu/iova.c 		for_each_online_cpu(cpu)
cpu               433 drivers/iommu/iova.c 			free_cpu_cached_iovas(cpu, iovad);
cpu               511 drivers/iommu/iova.c 	int cpu;
cpu               521 drivers/iommu/iova.c 	for_each_possible_cpu(cpu) {
cpu               522 drivers/iommu/iova.c 		struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
cpu               533 drivers/iommu/iova.c 	int cpu;
cpu               538 drivers/iommu/iova.c 	for_each_possible_cpu(cpu) {
cpu               542 drivers/iommu/iova.c 		fq = per_cpu_ptr(iovad->fq, cpu);
cpu               864 drivers/iommu/iova.c 	unsigned int cpu;
cpu               874 drivers/iommu/iova.c 		for_each_possible_cpu(cpu) {
cpu               875 drivers/iommu/iova.c 			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
cpu              1011 drivers/iommu/iova.c 	unsigned int cpu;
cpu              1016 drivers/iommu/iova.c 		for_each_possible_cpu(cpu) {
cpu              1017 drivers/iommu/iova.c 			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
cpu              1030 drivers/iommu/iova.c void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
cpu              1039 drivers/iommu/iova.c 		cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
cpu               134 drivers/irqchip/irq-armada-370-xp.c #define ARMADA_370_XP_INT_CAUSE_PERF(cpu)	(1 << cpu)
cpu               321 drivers/irqchip/irq-armada-370-xp.c 	int cpu;
cpu               324 drivers/irqchip/irq-armada-370-xp.c 	cpu = cpumask_any_and(mask_val, cpu_online_mask);
cpu               325 drivers/irqchip/irq-armada-370-xp.c 	mask = 1UL << cpu_logical_map(cpu);
cpu               333 drivers/irqchip/irq-armada-370-xp.c 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu               410 drivers/irqchip/irq-armada-370-xp.c 	int cpu;
cpu               414 drivers/irqchip/irq-armada-370-xp.c 	for_each_cpu(cpu, mask)
cpu               415 drivers/irqchip/irq-armada-370-xp.c 		map |= 1 << cpu_logical_map(cpu);
cpu               450 drivers/irqchip/irq-armada-370-xp.c static int armada_xp_mpic_starting_cpu(unsigned int cpu)
cpu               458 drivers/irqchip/irq-armada-370-xp.c static int mpic_cascaded_starting_cpu(unsigned int cpu)
cpu                26 drivers/irqchip/irq-bcm2836.c 						 int cpu)
cpu                28 drivers/irqchip/irq-bcm2836.c 	void __iomem *reg = intc.base + reg_offset + 4 * cpu;
cpu                35 drivers/irqchip/irq-bcm2836.c 						 int cpu)
cpu                37 drivers/irqchip/irq-bcm2836.c 	void __iomem *reg = intc.base + reg_offset + 4 * cpu;
cpu               126 drivers/irqchip/irq-bcm2836.c 	int cpu = smp_processor_id();
cpu               129 drivers/irqchip/irq-bcm2836.c 	stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu);
cpu               133 drivers/irqchip/irq-bcm2836.c 					  LOCAL_MAILBOX0_CLR0 + 16 * cpu);
cpu               151 drivers/irqchip/irq-bcm2836.c 	int cpu;
cpu               160 drivers/irqchip/irq-bcm2836.c 	for_each_cpu(cpu, mask)	{
cpu               161 drivers/irqchip/irq-bcm2836.c 		writel(1 << ipi, mailbox0_base + 16 * cpu);
cpu               165 drivers/irqchip/irq-bcm2836.c static int bcm2836_cpu_starting(unsigned int cpu)
cpu               168 drivers/irqchip/irq-bcm2836.c 					       cpu);
cpu               172 drivers/irqchip/irq-bcm2836.c static int bcm2836_cpu_dying(unsigned int cpu)
cpu               175 drivers/irqchip/irq-bcm2836.c 					     cpu);
cpu               119 drivers/irqchip/irq-bcm6345-l1.c 	struct bcm6345_l1_cpu *cpu;
cpu               124 drivers/irqchip/irq-bcm6345-l1.c 	cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
cpu               126 drivers/irqchip/irq-bcm6345-l1.c 	cpu = intc->cpus[0];
cpu               137 drivers/irqchip/irq-bcm6345-l1.c 		pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
cpu               138 drivers/irqchip/irq-bcm6345-l1.c 		pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
cpu               242 drivers/irqchip/irq-bcm6345-l1.c 	struct bcm6345_l1_cpu *cpu;
cpu               255 drivers/irqchip/irq-bcm6345-l1.c 	cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
cpu               257 drivers/irqchip/irq-bcm6345-l1.c 	if (!cpu)
cpu               260 drivers/irqchip/irq-bcm6345-l1.c 	cpu->map_base = ioremap(res.start, sz);
cpu               261 drivers/irqchip/irq-bcm6345-l1.c 	if (!cpu->map_base)
cpu               265 drivers/irqchip/irq-bcm6345-l1.c 		cpu->enable_cache[i] = 0;
cpu               266 drivers/irqchip/irq-bcm6345-l1.c 		__raw_writel(0, cpu->map_base + reg_enable(intc, i));
cpu               269 drivers/irqchip/irq-bcm6345-l1.c 	cpu->parent_irq = irq_of_parse_and_map(dn, idx);
cpu               270 drivers/irqchip/irq-bcm6345-l1.c 	if (!cpu->parent_irq) {
cpu               271 drivers/irqchip/irq-bcm6345-l1.c 		pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
cpu               274 drivers/irqchip/irq-bcm6345-l1.c 	irq_set_chained_handler_and_data(cpu->parent_irq,
cpu               340 drivers/irqchip/irq-bcm6345-l1.c 		struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
cpu               343 drivers/irqchip/irq-bcm6345-l1.c 				cpu->map_base, cpu->parent_irq);
cpu               350 drivers/irqchip/irq-bcm6345-l1.c 		struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
cpu               352 drivers/irqchip/irq-bcm6345-l1.c 		if (cpu) {
cpu               353 drivers/irqchip/irq-bcm6345-l1.c 			if (cpu->map_base)
cpu               354 drivers/irqchip/irq-bcm6345-l1.c 				iounmap(cpu->map_base);
cpu               355 drivers/irqchip/irq-bcm6345-l1.c 			kfree(cpu);
cpu               117 drivers/irqchip/irq-bcm7038-l1.c 	struct bcm7038_l1_cpu *cpu;
cpu               122 drivers/irqchip/irq-bcm7038-l1.c 	cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
cpu               124 drivers/irqchip/irq-bcm7038-l1.c 	cpu = intc->cpus[0];
cpu               135 drivers/irqchip/irq-bcm7038-l1.c 		pending = l1_readl(cpu->map_base + reg_status(intc, idx)) &
cpu               136 drivers/irqchip/irq-bcm7038-l1.c 			  ~cpu->mask_cache[idx];
cpu               221 drivers/irqchip/irq-bcm7038-l1.c 	int cpu = smp_processor_id();
cpu               225 drivers/irqchip/irq-bcm7038-l1.c 	if (!cpumask_test_cpu(cpu, mask))
cpu               234 drivers/irqchip/irq-bcm7038-l1.c 		cpumask_clear_cpu(cpu, &new_affinity);
cpu               250 drivers/irqchip/irq-bcm7038-l1.c 	struct bcm7038_l1_cpu *cpu;
cpu               265 drivers/irqchip/irq-bcm7038-l1.c 	cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
cpu               267 drivers/irqchip/irq-bcm7038-l1.c 	if (!cpu)
cpu               270 drivers/irqchip/irq-bcm7038-l1.c 	cpu->map_base = ioremap(res.start, sz);
cpu               271 drivers/irqchip/irq-bcm7038-l1.c 	if (!cpu->map_base)
cpu               275 drivers/irqchip/irq-bcm7038-l1.c 		l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i));
cpu               276 drivers/irqchip/irq-bcm7038-l1.c 		cpu->mask_cache[i] = 0xffffffff;
cpu               354 drivers/irqchip/irq-bcm7038-l1.c 		struct bcm7038_l1_cpu *cpu = intc->cpus[idx];
cpu               356 drivers/irqchip/irq-bcm7038-l1.c 		if (cpu) {
cpu               357 drivers/irqchip/irq-bcm7038-l1.c 			if (cpu->map_base)
cpu               358 drivers/irqchip/irq-bcm7038-l1.c 				iounmap(cpu->map_base);
cpu               359 drivers/irqchip/irq-bcm7038-l1.c 			kfree(cpu);
cpu               131 drivers/irqchip/irq-csky-mpintc.c 	unsigned int cpu;
cpu               135 drivers/irqchip/irq-csky-mpintc.c 		cpu = cpumask_any_and(mask_val, cpu_online_mask);
cpu               137 drivers/irqchip/irq-csky-mpintc.c 		cpu = cpumask_first(mask_val);
cpu               139 drivers/irqchip/irq-csky-mpintc.c 	if (cpu >= nr_cpu_ids)
cpu               152 drivers/irqchip/irq-csky-mpintc.c 		cpu = 0;
cpu               154 drivers/irqchip/irq-csky-mpintc.c 		cpu |= BIT(31);
cpu               156 drivers/irqchip/irq-csky-mpintc.c 	writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
cpu               158 drivers/irqchip/irq-csky-mpintc.c 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu               231 drivers/irqchip/irq-csky-mpintc.c 	unsigned int cpu, nr_irq;
cpu               264 drivers/irqchip/irq-csky-mpintc.c 	for_each_present_cpu(cpu) {
cpu               265 drivers/irqchip/irq-csky-mpintc.c 		per_cpu(intcl_reg, cpu) = INTCL_base + (INTCL_SIZE * cpu);
cpu               266 drivers/irqchip/irq-csky-mpintc.c 		writel_relaxed(BIT(0), per_cpu(intcl_reg, cpu) + INTCL_PICTLR);
cpu               174 drivers/irqchip/irq-gic-v3-its.c #define gic_data_rdist_cpu(cpu)		(per_cpu_ptr(gic_rdists->rdist, cpu))
cpu              1143 drivers/irqchip/irq-gic-v3-its.c 	unsigned int cpu;
cpu              1162 drivers/irqchip/irq-gic-v3-its.c 	cpu = cpumask_any_and(mask_val, cpu_mask);
cpu              1164 drivers/irqchip/irq-gic-v3-its.c 	if (cpu >= nr_cpu_ids)
cpu              1168 drivers/irqchip/irq-gic-v3-its.c 	if (cpu != its_dev->event_map.col_map[id]) {
cpu              1169 drivers/irqchip/irq-gic-v3-its.c 		target_col = &its_dev->its->collections[cpu];
cpu              1171 drivers/irqchip/irq-gic-v3-its.c 		its_dev->event_map.col_map[id] = cpu;
cpu              1172 drivers/irqchip/irq-gic-v3-its.c 		irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu              2052 drivers/irqchip/irq-gic-v3-its.c 	int err, cpu;
cpu              2074 drivers/irqchip/irq-gic-v3-its.c 	for_each_possible_cpu(cpu) {
cpu              2079 drivers/irqchip/irq-gic-v3-its.c 			pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
cpu              2083 drivers/irqchip/irq-gic-v3-its.c 		gic_data_rdist_cpu(cpu)->pend_page = pend_page;
cpu              2233 drivers/irqchip/irq-gic-v3-its.c 	int cpu = smp_processor_id();
cpu              2240 drivers/irqchip/irq-gic-v3-its.c 		cpu_node = of_get_cpu_node(cpu, NULL);
cpu              2263 drivers/irqchip/irq-gic-v3-its.c 	its->collections[cpu].target_address = target;
cpu              2264 drivers/irqchip/irq-gic-v3-its.c 	its->collections[cpu].col_id = cpu;
cpu              2266 drivers/irqchip/irq-gic-v3-its.c 	its_send_mapc(its, &its->collections[cpu], 1);
cpu              2267 drivers/irqchip/irq-gic-v3-its.c 	its_send_invall(its, &its->collections[cpu]);
cpu              2618 drivers/irqchip/irq-gic-v3-its.c 	int cpu;
cpu              2625 drivers/irqchip/irq-gic-v3-its.c 	cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
cpu              2626 drivers/irqchip/irq-gic-v3-its.c 	if (cpu >= nr_cpu_ids) {
cpu              2630 drivers/irqchip/irq-gic-v3-its.c 		cpu = cpumask_first(cpu_online_mask);
cpu              2633 drivers/irqchip/irq-gic-v3-its.c 	its_dev->event_map.col_map[event] = cpu;
cpu              2634 drivers/irqchip/irq-gic-v3-its.c 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu              2797 drivers/irqchip/irq-gic-v3-its.c 	int cpu = cpumask_first(mask_val);
cpu              2805 drivers/irqchip/irq-gic-v3-its.c 	if (vpe->col_idx != cpu) {
cpu              2808 drivers/irqchip/irq-gic-v3-its.c 		vpe->col_idx = cpu;
cpu              2810 drivers/irqchip/irq-gic-v3-its.c 		its_vpe_db_proxy_move(vpe, from, cpu);
cpu              2813 drivers/irqchip/irq-gic-v3-its.c 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu               879 drivers/irqchip/irq-gic-v3.c 	int i, cpu = smp_processor_id();
cpu               880 drivers/irqchip/irq-gic-v3.c 	u64 mpidr = cpu_logical_map(cpu);
cpu               967 drivers/irqchip/irq-gic-v3.c 	per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
cpu               971 drivers/irqchip/irq-gic-v3.c 		bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
cpu               976 drivers/irqchip/irq-gic-v3.c 				cpu, (unsigned long)mpidr,
cpu              1039 drivers/irqchip/irq-gic-v3.c static int gic_starting_cpu(unsigned int cpu)
cpu              1052 drivers/irqchip/irq-gic-v3.c 	int next_cpu, cpu = *base_cpu;
cpu              1053 drivers/irqchip/irq-gic-v3.c 	unsigned long mpidr = cpu_logical_map(cpu);
cpu              1056 drivers/irqchip/irq-gic-v3.c 	while (cpu < nr_cpu_ids) {
cpu              1059 drivers/irqchip/irq-gic-v3.c 		next_cpu = cpumask_next(cpu, mask);
cpu              1062 drivers/irqchip/irq-gic-v3.c 		cpu = next_cpu;
cpu              1064 drivers/irqchip/irq-gic-v3.c 		mpidr = cpu_logical_map(cpu);
cpu              1067 drivers/irqchip/irq-gic-v3.c 			cpu--;
cpu              1072 drivers/irqchip/irq-gic-v3.c 	*base_cpu = cpu;
cpu              1097 drivers/irqchip/irq-gic-v3.c 	int cpu;
cpu              1108 drivers/irqchip/irq-gic-v3.c 	for_each_cpu(cpu, mask) {
cpu              1109 drivers/irqchip/irq-gic-v3.c 		u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
cpu              1112 drivers/irqchip/irq-gic-v3.c 		tlist = gic_compute_target_list(&cpu, mask, cluster_id);
cpu              1131 drivers/irqchip/irq-gic-v3.c 	unsigned int cpu;
cpu              1138 drivers/irqchip/irq-gic-v3.c 		cpu = cpumask_first(mask_val);
cpu              1140 drivers/irqchip/irq-gic-v3.c 		cpu = cpumask_any_and(mask_val, cpu_online_mask);
cpu              1142 drivers/irqchip/irq-gic-v3.c 	if (cpu >= nr_cpu_ids)
cpu              1155 drivers/irqchip/irq-gic-v3.c 	val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
cpu              1168 drivers/irqchip/irq-gic-v3.c 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu              1646 drivers/irqchip/irq-gic-v3.c 			int err, cpu;
cpu              1659 drivers/irqchip/irq-gic-v3.c 			cpu = of_cpu_node_to_id(cpu_node);
cpu              1660 drivers/irqchip/irq-gic-v3.c 			if (WARN_ON(cpu < 0))
cpu              1663 drivers/irqchip/irq-gic-v3.c 			pr_cont("%pOF[%d] ", cpu_node, cpu);
cpu              1665 drivers/irqchip/irq-gic-v3.c 			cpumask_set_cpu(cpu, &part->mask);
cpu               333 drivers/irqchip/irq-gic.c 	unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
cpu               338 drivers/irqchip/irq-gic.c 		cpu = cpumask_any_and(mask_val, cpu_online_mask);
cpu               340 drivers/irqchip/irq-gic.c 		cpu = cpumask_first(mask_val);
cpu               342 drivers/irqchip/irq-gic.c 	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
cpu               347 drivers/irqchip/irq-gic.c 	bit = gic_cpu_map[cpu] << shift;
cpu               352 drivers/irqchip/irq-gic.c 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu               518 drivers/irqchip/irq-gic.c 	unsigned int cpu_mask, cpu = smp_processor_id();
cpu               530 drivers/irqchip/irq-gic.c 		if (WARN_ON(cpu >= NR_GIC_CPU_IF))
cpu               535 drivers/irqchip/irq-gic.c 		gic_cpu_map[cpu] = cpu_mask;
cpu               542 drivers/irqchip/irq-gic.c 			if (i != cpu)
cpu               808 drivers/irqchip/irq-gic.c 	int cpu;
cpu               821 drivers/irqchip/irq-gic.c 	for_each_cpu(cpu, mask)
cpu               822 drivers/irqchip/irq-gic.c 		map |= gic_cpu_map[cpu];
cpu               861 drivers/irqchip/irq-gic.c int gic_get_cpu_id(unsigned int cpu)
cpu               865 drivers/irqchip/irq-gic.c 	if (cpu >= NR_GIC_CPU_IF)
cpu               867 drivers/irqchip/irq-gic.c 	cpu_bit = gic_cpu_map[cpu];
cpu               887 drivers/irqchip/irq-gic.c 	int i, ror_val, cpu = smp_processor_id();
cpu               897 drivers/irqchip/irq-gic.c 	cur_cpu_id = __ffs(gic_cpu_map[cpu]);
cpu               904 drivers/irqchip/irq-gic.c 	gic_cpu_map[cpu] = 1 << new_cpu_id;
cpu              1039 drivers/irqchip/irq-gic.c static int gic_starting_cpu(unsigned int cpu)
cpu              1104 drivers/irqchip/irq-gic.c 		unsigned int cpu;
cpu              1114 drivers/irqchip/irq-gic.c 		for_each_possible_cpu(cpu) {
cpu              1115 drivers/irqchip/irq-gic.c 			u32 mpidr = cpu_logical_map(cpu);
cpu              1118 drivers/irqchip/irq-gic.c 			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
cpu              1120 drivers/irqchip/irq-gic.c 			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
cpu               151 drivers/irqchip/irq-hip04.c 	unsigned int cpu, shift = (hip04_irq(d) % 2) * 16;
cpu               155 drivers/irqchip/irq-hip04.c 		cpu = cpumask_any_and(mask_val, cpu_online_mask);
cpu               157 drivers/irqchip/irq-hip04.c 		cpu = cpumask_first(mask_val);
cpu               159 drivers/irqchip/irq-hip04.c 	if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids)
cpu               165 drivers/irqchip/irq-hip04.c 	bit = hip04_cpu_map[cpu] << shift;
cpu               170 drivers/irqchip/irq-hip04.c 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu               258 drivers/irqchip/irq-hip04.c 	unsigned int cpu_mask, cpu = smp_processor_id();
cpu               264 drivers/irqchip/irq-hip04.c 	BUG_ON(cpu >= NR_HIP04_CPU_IF);
cpu               266 drivers/irqchip/irq-hip04.c 	hip04_cpu_map[cpu] = cpu_mask;
cpu               273 drivers/irqchip/irq-hip04.c 		if (i != cpu)
cpu               285 drivers/irqchip/irq-hip04.c 	int cpu;
cpu               291 drivers/irqchip/irq-hip04.c 	for_each_cpu(cpu, mask)
cpu               292 drivers/irqchip/irq-hip04.c 		map |= hip04_cpu_map[cpu];
cpu               350 drivers/irqchip/irq-hip04.c static int hip04_irq_starting_cpu(unsigned int cpu)
cpu                76 drivers/irqchip/irq-jcore-aic.c 		unsigned cpu;
cpu                78 drivers/irqchip/irq-jcore-aic.c 		for_each_present_cpu(cpu) {
cpu                79 drivers/irqchip/irq-jcore-aic.c 			void __iomem *base = of_iomap(node, cpu);
cpu                82 drivers/irqchip/irq-jcore-aic.c 				pr_err("Unable to map AIC for cpu %u\n", cpu);
cpu               107 drivers/irqchip/irq-ls-scfg-msi.c 	u32 cpu;
cpu               113 drivers/irqchip/irq-ls-scfg-msi.c 		cpu = cpumask_any_and(mask, cpu_online_mask);
cpu               115 drivers/irqchip/irq-ls-scfg-msi.c 		cpu = cpumask_first(mask);
cpu               117 drivers/irqchip/irq-ls-scfg-msi.c 	if (cpu >= msi_data->msir_num)
cpu               120 drivers/irqchip/irq-ls-scfg-msi.c 	if (msi_data->msir[cpu].gic_irq <= 0) {
cpu               121 drivers/irqchip/irq-ls-scfg-msi.c 		pr_warn("cannot bind the irq to cpu%d\n", cpu);
cpu               125 drivers/irqchip/irq-ls-scfg-msi.c 	irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
cpu                91 drivers/irqchip/irq-mips-cpu.c static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu)
cpu               100 drivers/irqchip/irq-mips-cpu.c 	WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu));
cpu               103 drivers/irqchip/irq-mips-cpu.c 	settc(cpu_vpe_id(&cpu_data[cpu]));
cpu               108 drivers/irqchip/irq-mips-gic.c static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
cpu               187 drivers/irqchip/irq-mips-gic.c 	unsigned int cpu;
cpu               192 drivers/irqchip/irq-mips-gic.c 	cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
cpu               193 drivers/irqchip/irq-mips-gic.c 	set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
cpu               261 drivers/irqchip/irq-mips-gic.c 	unsigned int cpu;
cpu               263 drivers/irqchip/irq-mips-gic.c 	cpu = cpumask_first_and(cpumask, cpu_online_mask);
cpu               264 drivers/irqchip/irq-mips-gic.c 	if (cpu >= NR_CPUS)
cpu               271 drivers/irqchip/irq-mips-gic.c 	write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
cpu               276 drivers/irqchip/irq-mips-gic.c 		set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
cpu               278 drivers/irqchip/irq-mips-gic.c 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu               351 drivers/irqchip/irq-mips-gic.c 	int intr, cpu;
cpu               358 drivers/irqchip/irq-mips-gic.c 	for_each_online_cpu(cpu) {
cpu               359 drivers/irqchip/irq-mips-gic.c 		write_gic_vl_other(mips_cm_vp_id(cpu));
cpu               369 drivers/irqchip/irq-mips-gic.c 	int intr, cpu;
cpu               376 drivers/irqchip/irq-mips-gic.c 	for_each_online_cpu(cpu) {
cpu               377 drivers/irqchip/irq-mips-gic.c 		write_gic_vl_other(mips_cm_vp_id(cpu));
cpu               416 drivers/irqchip/irq-mips-gic.c 				     irq_hw_number_t hw, unsigned int cpu)
cpu               426 drivers/irqchip/irq-mips-gic.c 	write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
cpu               427 drivers/irqchip/irq-mips-gic.c 	irq_data_update_effective_affinity(data, cpumask_of(cpu));
cpu               458 drivers/irqchip/irq-mips-gic.c 	int err, cpu;
cpu               518 drivers/irqchip/irq-mips-gic.c 	for_each_online_cpu(cpu) {
cpu               519 drivers/irqchip/irq-mips-gic.c 		write_gic_vl_other(mips_cm_vp_id(cpu));
cpu               573 drivers/irqchip/irq-mips-gic.c 	int cpu, ret, i;
cpu               588 drivers/irqchip/irq-mips-gic.c 	for_each_cpu(cpu, ipimask) {
cpu               607 drivers/irqchip/irq-mips-gic.c 		ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
cpu               656 drivers/irqchip/irq-mips-gic.c static int gic_cpu_startup(unsigned int cpu)
cpu                72 drivers/irqchip/irq-ompic.c #define OMPIC_CTRL(cpu)		(0x0 + (cpu * OMPIC_CPUBYTES))
cpu                73 drivers/irqchip/irq-ompic.c #define OMPIC_STAT(cpu)		(0x4 + (cpu * OMPIC_CPUBYTES))
cpu                77 drivers/irqchip/irq-ompic.c #define OMPIC_CTRL_DST(cpu)	(((cpu) & 0x3fff) << 16)
cpu               121 drivers/irqchip/irq-ompic.c 	unsigned int cpu = smp_processor_id();
cpu               122 drivers/irqchip/irq-ompic.c 	unsigned long *pending_ops = &per_cpu(ops, cpu);
cpu               125 drivers/irqchip/irq-ompic.c 	ompic_writereg(ompic_base, OMPIC_CTRL(cpu), OMPIC_CTRL_IRQ_ACK);
cpu                26 drivers/irqchip/irq-partition-percpu.c 				unsigned int cpu, unsigned int hwirq)
cpu                28 drivers/irqchip/irq-partition-percpu.c 	return cpumask_test_cpu(cpu, &part->parts[hwirq].mask);
cpu               117 drivers/irqchip/irq-partition-percpu.c 	int cpu = smp_processor_id();
cpu               123 drivers/irqchip/irq-partition-percpu.c 		if (partition_check_cpu(part, cpu, hwirq))
cpu                89 drivers/irqchip/irq-sifive-plic.c 	int cpu;
cpu                92 drivers/irqchip/irq-sifive-plic.c 	for_each_cpu(cpu, mask) {
cpu                93 drivers/irqchip/irq-sifive-plic.c 		struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
cpu               102 drivers/irqchip/irq-sifive-plic.c 	unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
cpu               104 drivers/irqchip/irq-sifive-plic.c 	if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
cpu               106 drivers/irqchip/irq-sifive-plic.c 	plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
cpu               118 drivers/irqchip/irq-sifive-plic.c 	unsigned int cpu;
cpu               121 drivers/irqchip/irq-sifive-plic.c 		cpu = cpumask_first(mask_val);
cpu               123 drivers/irqchip/irq-sifive-plic.c 		cpu = cpumask_any_and(mask_val, cpu_online_mask);
cpu               125 drivers/irqchip/irq-sifive-plic.c 	if (cpu >= nr_cpu_ids)
cpu               129 drivers/irqchip/irq-sifive-plic.c 	plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
cpu               131 drivers/irqchip/irq-sifive-plic.c 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu               247 drivers/irqchip/irq-sifive-plic.c 		int cpu, hartid;
cpu               265 drivers/irqchip/irq-sifive-plic.c 		cpu = riscv_hartid_to_cpuid(hartid);
cpu               266 drivers/irqchip/irq-sifive-plic.c 		if (cpu < 0) {
cpu               276 drivers/irqchip/irq-sifive-plic.c 		handler = per_cpu_ptr(&plic_handlers, cpu);
cpu               133 drivers/irqchip/irq-xtensa-mx.c 	int cpu = cpumask_any_and(dest, cpu_online_mask);
cpu               134 drivers/irqchip/irq-xtensa-mx.c 	unsigned mask = 1u << cpu;
cpu               137 drivers/irqchip/irq-xtensa-mx.c 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
cpu               115 drivers/leds/trigger/ledtrig-cpu.c static int ledtrig_online_cpu(unsigned int cpu)
cpu               121 drivers/leds/trigger/ledtrig-cpu.c static int ledtrig_prepare_down_cpu(unsigned int cpu)
cpu               129 drivers/leds/trigger/ledtrig-cpu.c 	int cpu;
cpu               145 drivers/leds/trigger/ledtrig-cpu.c 	for_each_possible_cpu(cpu) {
cpu               146 drivers/leds/trigger/ledtrig-cpu.c 		struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
cpu               148 drivers/leds/trigger/ledtrig-cpu.c 		snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
cpu                68 drivers/macintosh/rack-meter.c 	struct rackmeter_cpu		cpu[2];
cpu                82 drivers/macintosh/rack-meter.c static inline u64 get_cpu_idle_time(unsigned int cpu)
cpu                86 drivers/macintosh/rack-meter.c 	retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
cpu                87 drivers/macintosh/rack-meter.c 		 kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
cpu                90 drivers/macintosh/rack-meter.c 		retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
cpu               217 drivers/macintosh/rack-meter.c 	unsigned int cpu = smp_processor_id();
cpu               226 drivers/macintosh/rack-meter.c 	total_idle_nsecs = get_cpu_idle_time(cpu);
cpu               236 drivers/macintosh/rack-meter.c 	offset = cpu << 3;
cpu               246 drivers/macintosh/rack-meter.c 	pause = (rm->cpu[0].zero && rm->cpu[1].zero);
cpu               249 drivers/macintosh/rack-meter.c 		pause = (rm->cpu[0].zero && rm->cpu[1].zero);
cpu               253 drivers/macintosh/rack-meter.c 	schedule_delayed_work_on(cpu, &rcpu->sniffer,
cpu               259 drivers/macintosh/rack-meter.c 	unsigned int cpu;
cpu               267 drivers/macintosh/rack-meter.c 	rm->cpu[0].rm = rm;
cpu               268 drivers/macintosh/rack-meter.c 	INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
cpu               269 drivers/macintosh/rack-meter.c 	rm->cpu[1].rm = rm;
cpu               270 drivers/macintosh/rack-meter.c 	INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
cpu               272 drivers/macintosh/rack-meter.c 	for_each_online_cpu(cpu) {
cpu               275 drivers/macintosh/rack-meter.c 		if (cpu > 1)
cpu               277 drivers/macintosh/rack-meter.c 		rcpu = &rm->cpu[cpu];
cpu               278 drivers/macintosh/rack-meter.c 		rcpu->prev_idle = get_cpu_idle_time(cpu);
cpu               280 drivers/macintosh/rack-meter.c 		schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
cpu               287 drivers/macintosh/rack-meter.c 	cancel_delayed_work_sync(&rm->cpu[0].sniffer);
cpu               288 drivers/macintosh/rack-meter.c 	cancel_delayed_work_sync(&rm->cpu[1].sniffer);
cpu                30 drivers/macintosh/windfarm_ad7417_sensor.c 	u8			cpu;
cpu               183 drivers/macintosh/windfarm_ad7417_sensor.c 	pv->sensors[index].name = kasprintf(GFP_KERNEL, "%s-%d", name, pv->cpu);
cpu               271 drivers/macintosh/windfarm_ad7417_sensor.c 	pv->cpu = cpu_nr;
cpu                74 drivers/macintosh/windfarm_mpu.h static inline const struct mpu_data *wf_get_mpu(int cpu)
cpu                86 drivers/macintosh/windfarm_mpu.h 	sprintf(nodename, "/u3@0,f8000000/i2c@f8001000/cpuid@a%d", cpu ? 2 : 0);
cpu               124 drivers/macintosh/windfarm_pm112.c static int create_cpu_loop(int cpu)
cpu               126 drivers/macintosh/windfarm_pm112.c 	int chip = cpu / 2;
cpu               127 drivers/macintosh/windfarm_pm112.c 	int core = cpu & 1;
cpu               176 drivers/macintosh/windfarm_pm112.c 	wf_cpu_pid_init(&cpu_pid[cpu], &pid);
cpu               263 drivers/macintosh/windfarm_pm112.c 	int err, cpu;
cpu               272 drivers/macintosh/windfarm_pm112.c 	for (cpu = 0; cpu < nr_cores; ++cpu) {
cpu               274 drivers/macintosh/windfarm_pm112.c 		sr = sens_cpu_temp[cpu];
cpu               279 drivers/macintosh/windfarm_pm112.c 			       "sensor error %d\n", cpu, err);
cpu               289 drivers/macintosh/windfarm_pm112.c 		sr = sens_cpu_power[cpu];
cpu               294 drivers/macintosh/windfarm_pm112.c 			       "sensor error %d\n", cpu, err);
cpu               301 drivers/macintosh/windfarm_pm112.c 		sp = &cpu_pid[cpu];
cpu               304 drivers/macintosh/windfarm_pm112.c 		if (cpu == 0 || sp->last_delta > greatest_delta) {
cpu               309 drivers/macintosh/windfarm_pm112.c 		    cpu, FIX32TOPRINT(power), FIX32TOPRINT(temp));
cpu               317 drivers/macintosh/windfarm_pm112.c 	for (cpu = 0; cpu < nr_cores; ++cpu)
cpu               318 drivers/macintosh/windfarm_pm112.c 		cpu_pid[cpu].target = target;
cpu               675 drivers/macintosh/windfarm_pm112.c 	struct device_node *cpu;
cpu               682 drivers/macintosh/windfarm_pm112.c 	for_each_node_by_type(cpu, "cpu")
cpu               208 drivers/macintosh/windfarm_pm72.c static int read_one_cpu_vals(int cpu, s32 *temp, s32 *power)
cpu               214 drivers/macintosh/windfarm_pm72.c 	rc = wf_sensor_get(sens_cpu_temp[cpu], &dtemp);
cpu               216 drivers/macintosh/windfarm_pm72.c 		DBG("  CPU%d: temp reading error !\n", cpu);
cpu               219 drivers/macintosh/windfarm_pm72.c 	DBG_LOTS("  CPU%d: temp   = %d.%03d\n", cpu, FIX32TOPRINT((dtemp)));
cpu               223 drivers/macintosh/windfarm_pm72.c 	rc = wf_sensor_get(sens_cpu_volts[cpu], &volts);
cpu               225 drivers/macintosh/windfarm_pm72.c 		DBG("  CPU%d, volts reading error !\n", cpu);
cpu               228 drivers/macintosh/windfarm_pm72.c 	DBG_LOTS("  CPU%d: volts  = %d.%03d\n", cpu, FIX32TOPRINT((volts)));
cpu               231 drivers/macintosh/windfarm_pm72.c 	rc = wf_sensor_get(sens_cpu_amps[cpu], &amps);
cpu               233 drivers/macintosh/windfarm_pm72.c 		DBG("  CPU%d, current reading error !\n", cpu);
cpu               236 drivers/macintosh/windfarm_pm72.c 	DBG_LOTS("  CPU%d: amps   = %d.%03d\n", cpu, FIX32TOPRINT((amps)));
cpu               245 drivers/macintosh/windfarm_pm72.c 	DBG_LOTS("  CPU%d: power  = %d.%03d\n", cpu, FIX32TOPRINT((*power)));
cpu               253 drivers/macintosh/windfarm_pm72.c 	int err, cpu;
cpu               258 drivers/macintosh/windfarm_pm72.c 	for (cpu = 0; cpu < nr_chips; ++cpu) {
cpu               259 drivers/macintosh/windfarm_pm72.c 		struct wf_cpu_pid_state *sp = &cpu_pid[cpu];
cpu               262 drivers/macintosh/windfarm_pm72.c 		wf_control_get(cpu_rear_fans[cpu], &sp->target);
cpu               264 drivers/macintosh/windfarm_pm72.c 		DBG_LOTS("  CPU%d: cur_target = %d RPM\n", cpu, sp->target);
cpu               266 drivers/macintosh/windfarm_pm72.c 		err = read_one_cpu_vals(cpu, &temp, &power);
cpu               283 drivers/macintosh/windfarm_pm72.c 		DBG_LOTS("  CPU%d: target = %d RPM\n", cpu, sp->target);
cpu               286 drivers/macintosh/windfarm_pm72.c 		err = wf_control_set(cpu_rear_fans[cpu], sp->target);
cpu               289 drivers/macintosh/windfarm_pm72.c 			       cpu_rear_fans[cpu]->name, err);
cpu               296 drivers/macintosh/windfarm_pm72.c 		DBG_LOTS("  CPU%d: intake = %d RPM\n", cpu, intake);
cpu               297 drivers/macintosh/windfarm_pm72.c 		err = wf_control_set(cpu_front_fans[cpu], intake);
cpu               300 drivers/macintosh/windfarm_pm72.c 			       cpu_front_fans[cpu]->name, err);
cpu               313 drivers/macintosh/windfarm_pm72.c 	int err, cpu;
cpu               367 drivers/macintosh/windfarm_pm72.c 	for (cpu = 0; cpu < nr_chips; cpu++) {
cpu               368 drivers/macintosh/windfarm_pm72.c 		err = wf_control_set(cpu_rear_fans[cpu], sp->target);
cpu               371 drivers/macintosh/windfarm_pm72.c 				   cpu_rear_fans[cpu]->name, err);
cpu               374 drivers/macintosh/windfarm_pm72.c 		err = wf_control_set(cpu_front_fans[cpu], intake);
cpu               377 drivers/macintosh/windfarm_pm72.c 				   cpu_front_fans[cpu]->name, err);
cpu               381 drivers/macintosh/windfarm_pm72.c 		if (cpu_pumps[cpu])
cpu               382 drivers/macintosh/windfarm_pm72.c 			err = wf_control_set(cpu_pumps[cpu], pump);
cpu               385 drivers/macintosh/windfarm_pm72.c 				   cpu_pumps[cpu]->name, err);
cpu               392 drivers/macintosh/windfarm_pm72.c static int cpu_setup_pid(int cpu)
cpu               395 drivers/macintosh/windfarm_pm72.c 	const struct mpu_data *mpu = cpu_mpu_data[cpu];
cpu               405 drivers/macintosh/windfarm_pm72.c 	    cpu, FIX32TOPRINT(ttarget), FIX32TOPRINT(tmax));
cpu               412 drivers/macintosh/windfarm_pm72.c 	fmin = wf_control_get_min(cpu_rear_fans[cpu]);
cpu               413 drivers/macintosh/windfarm_pm72.c 	fmax = wf_control_get_max(cpu_rear_fans[cpu]);
cpu               414 drivers/macintosh/windfarm_pm72.c 	DBG("wf_72: CPU%d max RPM range = [%d..%d]\n", cpu, fmin, fmax);
cpu               418 drivers/macintosh/windfarm_pm72.c 	DBG("wf_72: CPU%d history size = %d\n", cpu, hsize);
cpu               432 drivers/macintosh/windfarm_pm72.c 	wf_cpu_pid_init(&cpu_pid[cpu], &pid);
cpu               433 drivers/macintosh/windfarm_pm72.c 	cpu_pid[cpu].target = 1000;
cpu               796 drivers/macintosh/windfarm_pm72.c 	struct device_node *cpu;
cpu               805 drivers/macintosh/windfarm_pm72.c 	for_each_node_by_type(cpu, "cpu")
cpu               202 drivers/macintosh/windfarm_rm31.c static int read_one_cpu_vals(int cpu, s32 *temp, s32 *power)
cpu               208 drivers/macintosh/windfarm_rm31.c 	rc = wf_sensor_get(sens_cpu_temp[cpu], &dtemp);
cpu               210 drivers/macintosh/windfarm_rm31.c 		DBG("  CPU%d: temp reading error !\n", cpu);
cpu               213 drivers/macintosh/windfarm_rm31.c 	DBG_LOTS("  CPU%d: temp   = %d.%03d\n", cpu, FIX32TOPRINT((dtemp)));
cpu               217 drivers/macintosh/windfarm_rm31.c 	rc = wf_sensor_get(sens_cpu_volts[cpu], &volts);
cpu               219 drivers/macintosh/windfarm_rm31.c 		DBG("  CPU%d, volts reading error !\n", cpu);
cpu               222 drivers/macintosh/windfarm_rm31.c 	DBG_LOTS("  CPU%d: volts  = %d.%03d\n", cpu, FIX32TOPRINT((volts)));
cpu               225 drivers/macintosh/windfarm_rm31.c 	rc = wf_sensor_get(sens_cpu_amps[cpu], &amps);
cpu               227 drivers/macintosh/windfarm_rm31.c 		DBG("  CPU%d, current reading error !\n", cpu);
cpu               230 drivers/macintosh/windfarm_rm31.c 	DBG_LOTS("  CPU%d: amps   = %d.%03d\n", cpu, FIX32TOPRINT((amps)));
cpu               239 drivers/macintosh/windfarm_rm31.c 	DBG_LOTS("  CPU%d: power  = %d.%03d\n", cpu, FIX32TOPRINT((*power)));
cpu               247 drivers/macintosh/windfarm_rm31.c 	int err, cpu, i;
cpu               252 drivers/macintosh/windfarm_rm31.c 	for (cpu = 0; cpu < nr_chips; ++cpu) {
cpu               253 drivers/macintosh/windfarm_rm31.c 		struct wf_cpu_pid_state *sp = &cpu_pid[cpu];
cpu               256 drivers/macintosh/windfarm_rm31.c 		wf_control_get(cpu_fans[cpu][0], &sp->target);
cpu               258 drivers/macintosh/windfarm_rm31.c 		err = read_one_cpu_vals(cpu, &temp, &power);
cpu               275 drivers/macintosh/windfarm_rm31.c 		DBG_LOTS("  CPU%d: target = %d RPM\n", cpu, sp->target);
cpu               282 drivers/macintosh/windfarm_rm31.c 			err = wf_control_set(cpu_fans[cpu][i], speed);
cpu               285 drivers/macintosh/windfarm_rm31.c 					   cpu_fans[cpu][i]->name, err);
cpu               293 drivers/macintosh/windfarm_rm31.c static int cpu_setup_pid(int cpu)
cpu               296 drivers/macintosh/windfarm_rm31.c 	const struct mpu_data *mpu = cpu_mpu_data[cpu];
cpu               306 drivers/macintosh/windfarm_rm31.c 	    cpu, FIX32TOPRINT(ttarget), FIX32TOPRINT(tmax));
cpu               313 drivers/macintosh/windfarm_rm31.c 	fmin = wf_control_get_min(cpu_fans[cpu][0]);
cpu               314 drivers/macintosh/windfarm_rm31.c 	fmax = wf_control_get_max(cpu_fans[cpu][0]);
cpu               315 drivers/macintosh/windfarm_rm31.c 	DBG("wf_72: CPU%d max RPM range = [%d..%d]\n", cpu, fmin, fmax);
cpu               319 drivers/macintosh/windfarm_rm31.c 	DBG("wf_72: CPU%d history size = %d\n", cpu, hsize);
cpu               333 drivers/macintosh/windfarm_rm31.c 	wf_cpu_pid_init(&cpu_pid[cpu], &pid);
cpu               334 drivers/macintosh/windfarm_rm31.c 	cpu_pid[cpu].target = 4000;
cpu               689 drivers/macintosh/windfarm_rm31.c 	struct device_node *cpu;
cpu               697 drivers/macintosh/windfarm_rm31.c 	for_each_node_by_type(cpu, "cpu")
cpu               202 drivers/macintosh/windfarm_smu_sat.c 	int shift, cpu, index;
cpu               241 drivers/macintosh/windfarm_smu_sat.c 		cpu = 2 * chip + core;
cpu               268 drivers/macintosh/windfarm_smu_sat.c 			       "%s sensor %d (no memory)\n", name, cpu);
cpu               277 drivers/macintosh/windfarm_smu_sat.c 		snprintf((char *)sens->sens.name, 16, "%s-%d", name, cpu);
cpu               291 drivers/macintosh/windfarm_smu_sat.c 		cpu = 2 * sat->nr + core;
cpu               295 drivers/macintosh/windfarm_smu_sat.c 			       "sensor %d (no memory)\n", cpu);
cpu               304 drivers/macintosh/windfarm_smu_sat.c 		snprintf((char *)sens->sens.name, 16, "cpu-power-%d", cpu);
cpu               171 drivers/md/dm-stats.c 	int cpu;
cpu               177 drivers/md/dm-stats.c 	for_each_possible_cpu(cpu) {
cpu               178 drivers/md/dm-stats.c 		dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
cpu               179 drivers/md/dm-stats.c 		dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
cpu               193 drivers/md/dm-stats.c 	int cpu;
cpu               199 drivers/md/dm-stats.c 	for_each_possible_cpu(cpu) {
cpu               200 drivers/md/dm-stats.c 		last = per_cpu_ptr(stats->last, cpu);
cpu               251 drivers/md/dm-stats.c 	int cpu;
cpu               331 drivers/md/dm-stats.c 	for_each_possible_cpu(cpu) {
cpu               332 drivers/md/dm-stats.c 		p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
cpu               337 drivers/md/dm-stats.c 		s->stat_percpu[cpu] = p;
cpu               340 drivers/md/dm-stats.c 			hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
cpu               410 drivers/md/dm-stats.c 	int cpu;
cpu               426 drivers/md/dm-stats.c 	for_each_possible_cpu(cpu)
cpu               428 drivers/md/dm-stats.c 		    is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
cpu               672 drivers/md/dm-stats.c 	int cpu;
cpu               696 drivers/md/dm-stats.c 	for_each_possible_cpu(cpu) {
cpu               697 drivers/md/dm-stats.c 		p = &s->stat_percpu[cpu][x];
cpu               631 drivers/md/dm.c 	int cpu;
cpu               635 drivers/md/dm.c 	for_each_possible_cpu(cpu) {
cpu               636 drivers/md/dm.c 		sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
cpu               637 drivers/md/dm.c 		sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
cpu                61 drivers/md/raid5.c #define cpu_to_group(cpu) cpu_to_node(cpu)
cpu               171 drivers/md/raid5.c 	int i, cpu = sh->cpu;
cpu               173 drivers/md/raid5.c 	if (!cpu_online(cpu)) {
cpu               174 drivers/md/raid5.c 		cpu = cpumask_any(cpu_online_mask);
cpu               175 drivers/md/raid5.c 		sh->cpu = cpu;
cpu               180 drivers/md/raid5.c 		group = conf->worker_groups + cpu_to_group(cpu);
cpu               194 drivers/md/raid5.c 	group = conf->worker_groups + cpu_to_group(sh->cpu);
cpu               198 drivers/md/raid5.c 	queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
cpu               205 drivers/md/raid5.c 			queue_work_on(sh->cpu, raid5_wq,
cpu               526 drivers/md/raid5.c 	sh->cpu = smp_processor_id();
cpu              2059 drivers/md/raid5.c 	unsigned long cpu;
cpu              2061 drivers/md/raid5.c 	cpu = get_cpu();
cpu              2062 drivers/md/raid5.c 	percpu = per_cpu_ptr(conf->percpu, cpu);
cpu              2251 drivers/md/raid5.c 	unsigned long cpu;
cpu              2265 drivers/md/raid5.c 	for_each_present_cpu(cpu) {
cpu              2268 drivers/md/raid5.c 		percpu = per_cpu_ptr(conf->percpu, cpu);
cpu              5385 drivers/md/raid5.c 			    !cpu_online(tmp->cpu) ||
cpu              5386 drivers/md/raid5.c 			    cpu_to_group(tmp->cpu) == group) {
cpu              6777 drivers/md/raid5.c static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
cpu              6781 drivers/md/raid5.c 	free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
cpu              6814 drivers/md/raid5.c static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
cpu              6817 drivers/md/raid5.c 	struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
cpu              6821 drivers/md/raid5.c 			__func__, cpu);
cpu               221 drivers/md/raid5.h 	int			cpu;
cpu                 6 drivers/media/common/btcx-risc.h 	__le32         *cpu;
cpu               213 drivers/media/common/saa7146/saa7146_core.c 	if (NULL == pt->cpu)
cpu               215 drivers/media/common/saa7146/saa7146_core.c 	pci_free_consistent(pci, pt->size, pt->cpu, pt->dma);
cpu               216 drivers/media/common/saa7146/saa7146_core.c 	pt->cpu = NULL;
cpu               221 drivers/media/common/saa7146/saa7146_core.c 	__le32       *cpu;
cpu               224 drivers/media/common/saa7146/saa7146_core.c 	cpu = pci_alloc_consistent(pci, PAGE_SIZE, &dma_addr);
cpu               225 drivers/media/common/saa7146/saa7146_core.c 	if (NULL == cpu) {
cpu               229 drivers/media/common/saa7146/saa7146_core.c 	pt->cpu  = cpu;
cpu               249 drivers/media/common/saa7146/saa7146_core.c 	ptr = pt->cpu;
cpu                10 drivers/media/common/saa7146/saa7146_vbi.c 	u32          *cpu;
cpu                25 drivers/media/common/saa7146/saa7146_vbi.c 	cpu = pci_alloc_consistent(dev->pci, 4096, &dma_addr);
cpu                26 drivers/media/common/saa7146/saa7146_vbi.c 	if (NULL == cpu)
cpu               126 drivers/media/common/saa7146/saa7146_vbi.c 			pci_free_consistent(dev->pci, 4096, cpu, dma_addr);
cpu               131 drivers/media/common/saa7146/saa7146_vbi.c 	pci_free_consistent(dev->pci, 4096, cpu, dma_addr);
cpu               244 drivers/media/common/saa7146/saa7146_video.c 		ptr1 = pt1->cpu;
cpu               245 drivers/media/common/saa7146/saa7146_video.c 		ptr2 = pt2->cpu;
cpu               246 drivers/media/common/saa7146/saa7146_video.c 		ptr3 = pt3->cpu;
cpu               268 drivers/media/common/saa7146/saa7146_video.c 		ptr1 = pt1->cpu;
cpu               277 drivers/media/common/saa7146/saa7146_video.c 		ptr1 = pt1->cpu;
cpu               286 drivers/media/common/saa7146/saa7146_video.c 		ptr1 = pt1->cpu+m1;
cpu               287 drivers/media/common/saa7146/saa7146_video.c 		fill = pt1->cpu[m1];
cpu                44 drivers/media/pci/bt8xx/btcx-risc.c 	if (NULL == risc->cpu)
cpu                51 drivers/media/pci/bt8xx/btcx-risc.c 	pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
cpu                59 drivers/media/pci/bt8xx/btcx-risc.c 	__le32 *cpu;
cpu                62 drivers/media/pci/bt8xx/btcx-risc.c 	if (NULL != risc->cpu && risc->size < size)
cpu                64 drivers/media/pci/bt8xx/btcx-risc.c 	if (NULL == risc->cpu) {
cpu                65 drivers/media/pci/bt8xx/btcx-risc.c 		cpu = pci_alloc_consistent(pci, size, &dma);
cpu                66 drivers/media/pci/bt8xx/btcx-risc.c 		if (NULL == cpu)
cpu                68 drivers/media/pci/bt8xx/btcx-risc.c 		risc->cpu  = cpu;
cpu                74 drivers/media/pci/bt8xx/btcx-risc.c 			memcnt, (unsigned long)dma, cpu, size);
cpu                76 drivers/media/pci/bt8xx/btcx-risc.c 	memset(risc->cpu,0,risc->size);
cpu                 4 drivers/media/pci/bt8xx/btcx-risc.h 	__le32         *cpu;
cpu              3371 drivers/media/pci/bt8xx/bttv-driver.c 		btv->c.v4l2_dev.name, risc->cpu, (unsigned long)risc->dma);
cpu              3376 drivers/media/pci/bt8xx/bttv-driver.c 		n = bttv_risc_decode(le32_to_cpu(risc->cpu[i]));
cpu              3381 drivers/media/pci/bt8xx/bttv-driver.c 				risc->cpu[i+j], j);
cpu              3382 drivers/media/pci/bt8xx/bttv-driver.c 		if (0 == risc->cpu[i])
cpu              3447 drivers/media/pci/bt8xx/bttv-driver.c 		(unsigned long)le32_to_cpu(btv->main.cpu[RISC_SLOT_O_VBI+1]),
cpu              3448 drivers/media/pci/bt8xx/bttv-driver.c 		(unsigned long)le32_to_cpu(btv->main.cpu[RISC_SLOT_O_FIELD+1]),
cpu                58 drivers/media/pci/bt8xx/bttv-risc.c 	rp = risc->cpu;
cpu               109 drivers/media/pci/bt8xx/bttv-risc.c 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
cpu               141 drivers/media/pci/bt8xx/bttv-risc.c 	rp = risc->cpu;
cpu               230 drivers/media/pci/bt8xx/bttv-risc.c 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
cpu               262 drivers/media/pci/bt8xx/bttv-risc.c 	rp = risc->cpu;
cpu               318 drivers/media/pci/bt8xx/bttv-risc.c 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
cpu               486 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[RISC_SLOT_LOOP] = cpu_to_le32(cmd);
cpu               514 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[0] = cpu_to_le32(BT848_RISC_SYNC | BT848_RISC_RESYNC |
cpu               516 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[1] = cpu_to_le32(0);
cpu               517 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[2] = cpu_to_le32(BT848_RISC_JUMP);
cpu               518 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[3] = cpu_to_le32(btv->main.dma + (4<<2));
cpu               521 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[4] = cpu_to_le32(BT848_RISC_JUMP);
cpu               522 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[5] = cpu_to_le32(btv->main.dma + (6<<2));
cpu               523 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[6] = cpu_to_le32(BT848_RISC_JUMP);
cpu               524 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[7] = cpu_to_le32(btv->main.dma + (8<<2));
cpu               526 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[8] = cpu_to_le32(BT848_RISC_SYNC | BT848_RISC_RESYNC |
cpu               528 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[9] = cpu_to_le32(0);
cpu               531 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[10] = cpu_to_le32(BT848_RISC_JUMP);
cpu               532 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[11] = cpu_to_le32(btv->main.dma + (12<<2));
cpu               533 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[12] = cpu_to_le32(BT848_RISC_JUMP);
cpu               534 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[13] = cpu_to_le32(btv->main.dma + (14<<2));
cpu               537 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[14] = cpu_to_le32(BT848_RISC_JUMP);
cpu               538 drivers/media/pci/bt8xx/bttv-risc.c 	btv->main.cpu[15] = cpu_to_le32(btv->main.dma + (0<<2));
cpu               552 drivers/media/pci/bt8xx/bttv-risc.c 		btv->main.cpu[slot+1] = cpu_to_le32(next);
cpu               565 drivers/media/pci/bt8xx/bttv-risc.c 		btv->main.cpu[slot+1] = cpu_to_le32(risc->dma);
cpu               268 drivers/media/pci/cx23885/cx23885-alsa.c 	pci_free_consistent(chip->pci, risc->size, risc->cpu, risc->dma);
cpu               595 drivers/media/pci/cx23885/cx23885-core.c 	       dev->name, risc->cpu, (unsigned long)risc->dma);
cpu               598 drivers/media/pci/cx23885/cx23885-core.c 		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
cpu               601 drivers/media/pci/cx23885/cx23885-core.c 				dev->name, i + j, risc->cpu[i + j], j);
cpu               602 drivers/media/pci/cx23885/cx23885-core.c 		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
cpu              1221 drivers/media/pci/cx23885/cx23885-core.c 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
cpu              1222 drivers/media/pci/cx23885/cx23885-core.c 	if (risc->cpu == NULL)
cpu              1226 drivers/media/pci/cx23885/cx23885-core.c 	rp = risc->cpu;
cpu              1236 drivers/media/pci/cx23885/cx23885-core.c 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
cpu              1258 drivers/media/pci/cx23885/cx23885-core.c 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
cpu              1259 drivers/media/pci/cx23885/cx23885-core.c 	if (risc->cpu == NULL)
cpu              1263 drivers/media/pci/cx23885/cx23885-core.c 	rp = risc->cpu;
cpu              1269 drivers/media/pci/cx23885/cx23885-core.c 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
cpu              1296 drivers/media/pci/cx23885/cx23885-core.c 	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
cpu              1297 drivers/media/pci/cx23885/cx23885-core.c 	if (risc->cpu == NULL)
cpu              1300 drivers/media/pci/cx23885/cx23885-core.c 	rp = risc->cpu;
cpu              1316 drivers/media/pci/cx23885/cx23885-core.c 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
cpu              1326 drivers/media/pci/cx23885/cx23885-core.c 	pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
cpu              1619 drivers/media/pci/cx23885/cx23885-core.c 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
cpu              1630 drivers/media/pci/cx23885/cx23885-core.c 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
cpu               192 drivers/media/pci/cx23885/cx23885-vbi.c 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
cpu               205 drivers/media/pci/cx23885/cx23885-vbi.c 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
cpu               462 drivers/media/pci/cx23885/cx23885-video.c 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
cpu               473 drivers/media/pci/cx23885/cx23885-video.c 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
cpu               165 drivers/media/pci/cx23885/cx23885.h 	__le32         *cpu;
cpu               405 drivers/media/pci/cx25821/cx25821-alsa.c 	pci_free_consistent(chip->pci, risc->size, risc->cpu, risc->dma);
cpu               976 drivers/media/pci/cx25821/cx25821-core.c 	__le32 *cpu;
cpu               979 drivers/media/pci/cx25821/cx25821-core.c 	if (NULL != risc->cpu && risc->size < size)
cpu               980 drivers/media/pci/cx25821/cx25821-core.c 		pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
cpu               981 drivers/media/pci/cx25821/cx25821-core.c 	if (NULL == risc->cpu) {
cpu               982 drivers/media/pci/cx25821/cx25821-core.c 		cpu = pci_zalloc_consistent(pci, size, &dma);
cpu               983 drivers/media/pci/cx25821/cx25821-core.c 		if (NULL == cpu)
cpu               985 drivers/media/pci/cx25821/cx25821-core.c 		risc->cpu  = cpu;
cpu              1085 drivers/media/pci/cx25821/cx25821-core.c 	rp = risc->cpu;
cpu              1099 drivers/media/pci/cx25821/cx25821-core.c 	BUG_ON((risc->jmp - risc->cpu + 3) * sizeof(*risc->cpu) > risc->size);
cpu              1188 drivers/media/pci/cx25821/cx25821-core.c 	rp = risc->cpu;
cpu              1194 drivers/media/pci/cx25821/cx25821-core.c 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
cpu              1205 drivers/media/pci/cx25821/cx25821-core.c 			buf->risc.size, buf->risc.cpu, buf->risc.dma);
cpu               245 drivers/media/pci/cx25821/cx25821-video.c 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
cpu               253 drivers/media/pci/cx25821/cx25821-video.c 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
cpu               106 drivers/media/pci/cx25821/cx25821.h 	__le32         *cpu;
cpu               359 drivers/media/pci/cx88/cx88-alsa.c 	if (risc->cpu)
cpu               361 drivers/media/pci/cx88/cx88-alsa.c 				    risc->cpu, risc->dma);
cpu               687 drivers/media/pci/cx88/cx88-blackbird.c 	if (risc->cpu)
cpu               688 drivers/media/pci/cx88/cx88-blackbird.c 		pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
cpu               155 drivers/media/pci/cx88/cx88-core.c 	risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
cpu               156 drivers/media/pci/cx88/cx88-core.c 	if (!risc->cpu)
cpu               160 drivers/media/pci/cx88/cx88-core.c 	rp = risc->cpu;
cpu               171 drivers/media/pci/cx88/cx88-core.c 	WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
cpu               193 drivers/media/pci/cx88/cx88-core.c 	risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
cpu               194 drivers/media/pci/cx88/cx88-core.c 	if (!risc->cpu)
cpu               198 drivers/media/pci/cx88/cx88-core.c 	rp = risc->cpu;
cpu               204 drivers/media/pci/cx88/cx88-core.c 	WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
cpu               105 drivers/media/pci/cx88/cx88-dvb.c 	if (risc->cpu)
cpu               106 drivers/media/pci/cx88/cx88-dvb.c 		pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
cpu               228 drivers/media/pci/cx88/cx88-mpeg.c 		if (risc->cpu)
cpu               230 drivers/media/pci/cx88/cx88-mpeg.c 					    risc->cpu, risc->dma);
cpu               245 drivers/media/pci/cx88/cx88-mpeg.c 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
cpu               256 drivers/media/pci/cx88/cx88-mpeg.c 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
cpu               161 drivers/media/pci/cx88/cx88-vbi.c 	if (risc->cpu)
cpu               162 drivers/media/pci/cx88/cx88-vbi.c 		pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
cpu               175 drivers/media/pci/cx88/cx88-vbi.c 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
cpu               185 drivers/media/pci/cx88/cx88-vbi.c 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
cpu               496 drivers/media/pci/cx88/cx88-video.c 	if (risc->cpu)
cpu               497 drivers/media/pci/cx88/cx88-video.c 		pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
cpu               510 drivers/media/pci/cx88/cx88-video.c 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
cpu               520 drivers/media/pci/cx88/cx88-video.c 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
cpu               307 drivers/media/pci/cx88/cx88.h 	__le32         *cpu;
cpu               223 drivers/media/pci/saa7134/saa7134-core.c 	__le32       *cpu;
cpu               226 drivers/media/pci/saa7134/saa7134-core.c 	cpu = pci_alloc_consistent(pci, SAA7134_PGTABLE_SIZE, &dma_addr);
cpu               227 drivers/media/pci/saa7134/saa7134-core.c 	if (NULL == cpu)
cpu               230 drivers/media/pci/saa7134/saa7134-core.c 	pt->cpu  = cpu;
cpu               242 drivers/media/pci/saa7134/saa7134-core.c 	BUG_ON(NULL == pt || NULL == pt->cpu);
cpu               244 drivers/media/pci/saa7134/saa7134-core.c 	ptr = pt->cpu + startpage;
cpu               255 drivers/media/pci/saa7134/saa7134-core.c 	if (NULL == pt->cpu)
cpu               257 drivers/media/pci/saa7134/saa7134-core.c 	pci_free_consistent(pci, pt->size, pt->cpu, pt->dma);
cpu               258 drivers/media/pci/saa7134/saa7134-core.c 	pt->cpu = NULL;
cpu               449 drivers/media/pci/saa7134/saa7134.h 	__le32                     *cpu;
cpu                63 drivers/media/pci/saa7164/saa7164-buffer.c 		buf->cpu, (long long)buf->dma, buf->pci_size);
cpu               106 drivers/media/pci/saa7164/saa7164-buffer.c 	buf->cpu = pci_alloc_consistent(port->dev->pci, buf->pci_size,
cpu               108 drivers/media/pci/saa7164/saa7164-buffer.c 	if (!buf->cpu)
cpu               117 drivers/media/pci/saa7164/saa7164-buffer.c 	memset(buf->cpu, 0xff, buf->pci_size);
cpu               118 drivers/media/pci/saa7164/saa7164-buffer.c 	buf->crc = crc32(0, buf->cpu, buf->actual_size);
cpu               124 drivers/media/pci/saa7164/saa7164-buffer.c 		buf->cpu, (long)buf->dma, buf->pci_size);
cpu               140 drivers/media/pci/saa7164/saa7164-buffer.c 	pci_free_consistent(port->dev->pci, buf->pci_size, buf->cpu, buf->dma);
cpu               163 drivers/media/pci/saa7164/saa7164-buffer.c 	pci_free_consistent(dev->pci, buf->pci_size, buf->cpu, buf->dma);
cpu                88 drivers/media/pci/saa7164/saa7164-core.c 	u8 *p = (u8 *)buf->cpu;
cpu               113 drivers/media/pci/saa7164/saa7164-core.c 	u8 *bufcpu = (u8 *)buf->cpu;
cpu               273 drivers/media/pci/saa7164/saa7164-core.c 				buf->crc = crc32(0, buf->cpu, buf->actual_size);
cpu               277 drivers/media/pci/saa7164/saa7164-core.c 				p = (u8 *)buf->cpu;
cpu               312 drivers/media/pci/saa7164/saa7164-core.c 					memcpy(ubuf->data, buf->cpu, ubuf->actual_size);
cpu               339 drivers/media/pci/saa7164/saa7164-core.c 			memset(buf->cpu, 0xff, buf->pci_size);
cpu               342 drivers/media/pci/saa7164/saa7164-core.c 				buf->crc = crc32(0, buf->cpu, buf->actual_size);
cpu               517 drivers/media/pci/saa7164/saa7164-core.c 	dvb_dmx_swfilter_packets(&port->dvb.demux, (u8 *)buf->cpu,
cpu               307 drivers/media/pci/saa7164/saa7164.h 	u64 *cpu;	/* Virtual address */
cpu               154 drivers/media/pci/tw68/tw68-risc.c 	buf->cpu = pci_alloc_consistent(pci, buf->size, &buf->dma);
cpu               155 drivers/media/pci/tw68/tw68-risc.c 	if (buf->cpu == NULL)
cpu               159 drivers/media/pci/tw68/tw68-risc.c 	rp = buf->cpu;
cpu               169 drivers/media/pci/tw68/tw68-risc.c 	buf->cpu[1] = cpu_to_le32(buf->dma + 8);
cpu               171 drivers/media/pci/tw68/tw68-risc.c 	BUG_ON((buf->jmp - buf->cpu + 2) * sizeof(buf->cpu[0]) > buf->size);
cpu               218 drivers/media/pci/tw68/tw68-risc.c 		  core->name, buf, buf->cpu, buf->jmp);
cpu               219 drivers/media/pci/tw68/tw68-risc.c 	for (addr = buf->cpu; addr <= buf->jmp; addr += 2)
cpu               420 drivers/media/pci/tw68/tw68-video.c 		buf->cpu[0] |= cpu_to_le32(RISC_INT_BIT);
cpu               488 drivers/media/pci/tw68/tw68-video.c 	pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma);
cpu               116 drivers/media/pci/tw68/tw68.h 	__le32         *cpu;
cpu               310 drivers/misc/sgi-gru/grufile.c static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
cpu               321 drivers/misc/sgi-gru/grufile.c 	core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
cpu               322 drivers/misc/sgi-gru/grufile.c 	if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
cpu               356 drivers/misc/sgi-gru/grufile.c 			irq_handler_t irq_handler, int cpu, int blade)
cpu               362 drivers/misc/sgi-gru/grufile.c 	mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
cpu               387 drivers/misc/sgi-gru/grufile.c static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
cpu               395 drivers/misc/sgi-gru/grufile.c 	mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
cpu               406 drivers/misc/sgi-gru/grufile.c 			irq_handler_t irq_handler, int cpu, int blade)
cpu               412 drivers/misc/sgi-gru/grufile.c 	mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
cpu               416 drivers/misc/sgi-gru/grufile.c 	irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
cpu               434 drivers/misc/sgi-gru/grufile.c static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
cpu               439 drivers/misc/sgi-gru/grufile.c 	mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
cpu               454 drivers/misc/sgi-gru/grufile.c 	int cpu;
cpu               456 drivers/misc/sgi-gru/grufile.c 	for_each_online_cpu(cpu) {
cpu               457 drivers/misc/sgi-gru/grufile.c 		blade = uv_cpu_to_blade_id(cpu);
cpu               458 drivers/misc/sgi-gru/grufile.c 		gru_chiplet_teardown_tlb_irq(0, cpu, blade);
cpu               459 drivers/misc/sgi-gru/grufile.c 		gru_chiplet_teardown_tlb_irq(1, cpu, blade);
cpu               472 drivers/misc/sgi-gru/grufile.c 	int cpu;
cpu               475 drivers/misc/sgi-gru/grufile.c 	for_each_online_cpu(cpu) {
cpu               476 drivers/misc/sgi-gru/grufile.c 		blade = uv_cpu_to_blade_id(cpu);
cpu               477 drivers/misc/sgi-gru/grufile.c 		ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
cpu               481 drivers/misc/sgi-gru/grufile.c 		ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
cpu                47 drivers/misc/sgi-gru/grumain.c 	int cpu = smp_processor_id();
cpu                50 drivers/misc/sgi-gru/grumain.c 	core = uv_cpu_core_number(cpu);
cpu                51 drivers/misc/sgi-gru/grumain.c 	id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
cpu               112 drivers/misc/sgi-xp/xpc_uv.c xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
cpu               117 drivers/misc/sgi-xp/xpc_uv.c 	mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
cpu               132 drivers/misc/sgi-xp/xpc_uv.c 	mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
cpu               209 drivers/misc/sgi-xp/xpc_uv.c xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
cpu               242 drivers/misc/sgi-xp/xpc_uv.c 	mq->mmr_blade = uv_cpu_to_blade_id(cpu);
cpu               244 drivers/misc/sgi-xp/xpc_uv.c 	nid = cpu_to_node(cpu);
cpu               261 drivers/misc/sgi-xp/xpc_uv.c 	ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
cpu               272 drivers/misc/sgi-xp/xpc_uv.c 	nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
cpu              1743 drivers/misc/sgi-xp/xpc_uv.c 	int cpu;
cpu              1747 drivers/misc/sgi-xp/xpc_uv.c 	for_each_cpu(cpu, cpumask_of_node(nid)) {
cpu              1760 drivers/misc/sgi-xp/xpc_uv.c 	for_each_cpu(cpu, cpumask_of_node(nid)) {
cpu               161 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->cpu = 0;
cpu               221 drivers/net/ethernet/amazon/ena/ena_netdev.c 	node = cpu_to_node(ena_irq->cpu);
cpu               255 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->cpu = ena_irq->cpu;
cpu               370 drivers/net/ethernet/amazon/ena/ena_netdev.c 	node = cpu_to_node(ena_irq->cpu);
cpu               399 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rx_ring->cpu = ena_irq->cpu;
cpu              1213 drivers/net/ethernet/amazon/ena/ena_netdev.c 	int cpu = get_cpu();
cpu              1217 drivers/net/ethernet/amazon/ena/ena_netdev.c 	if (likely(tx_ring->cpu == cpu))
cpu              1220 drivers/net/ethernet/amazon/ena/ena_netdev.c 	numa_node = cpu_to_node(cpu);
cpu              1228 drivers/net/ethernet/amazon/ena/ena_netdev.c 	tx_ring->cpu = cpu;
cpu              1229 drivers/net/ethernet/amazon/ena/ena_netdev.c 	rx_ring->cpu = cpu;
cpu              1381 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u32 cpu;
cpu              1391 drivers/net/ethernet/amazon/ena/ena_netdev.c 	cpu = cpumask_first(cpu_online_mask);
cpu              1392 drivers/net/ethernet/amazon/ena/ena_netdev.c 	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
cpu              1393 drivers/net/ethernet/amazon/ena/ena_netdev.c 	cpumask_set_cpu(cpu,
cpu              1400 drivers/net/ethernet/amazon/ena/ena_netdev.c 	int irq_idx, i, cpu;
cpu              1406 drivers/net/ethernet/amazon/ena/ena_netdev.c 		cpu = i % num_online_cpus();
cpu              1414 drivers/net/ethernet/amazon/ena/ena_netdev.c 		adapter->irq_tbl[irq_idx].cpu = cpu;
cpu              1416 drivers/net/ethernet/amazon/ena/ena_netdev.c 		cpumask_set_cpu(cpu,
cpu              1650 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ctx.numa_node = cpu_to_node(tx_ring->cpu);
cpu              1717 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ctx.numa_node = cpu_to_node(rx_ring->cpu);
cpu               148 drivers/net/ethernet/amazon/ena/ena_netdev.h 	int cpu;
cpu               276 drivers/net/ethernet/amazon/ena/ena_netdev.h 	int cpu;
cpu               195 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	unsigned int cpu;
cpu               201 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
cpu               204 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		node = cpu_to_node(cpu);
cpu               217 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		cpumask_set_cpu(cpu, &channel->affinity_mask);
cpu               245 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			  "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
cpu               129 drivers/net/ethernet/aquantia/atlantic/aq_ring.h 	unsigned int cpu;
cpu               119 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	self->aq_ring_param.cpu =
cpu               122 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	cpumask_set_cpu(self->aq_ring_param.cpu,
cpu               540 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
cpu               572 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
cpu               608 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
cpu               640 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
cpu               327 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	int sqs, cpu;
cpu               339 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 		for_each_possible_cpu(cpu)
cpu               340 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 			tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
cpu              1099 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	int vec, cpu;
cpu              1110 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			cpu = nicvf_netdev_qidx(nic, vec) + 1;
cpu              1112 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			cpu = 0;
cpu              1114 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
cpu              1454 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	int cpu, err, qidx;
cpu              1527 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		for_each_possible_cpu(cpu)
cpu              1528 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
cpu              1664 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	int qidx, cpu;
cpu              1703 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		for_each_possible_cpu(cpu) {
cpu              1704 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
cpu               979 drivers/net/ethernet/chelsio/cxgb/sge.c 	int cpu;
cpu               982 drivers/net/ethernet/chelsio/cxgb/sge.c 	for_each_possible_cpu(cpu) {
cpu               983 drivers/net/ethernet/chelsio/cxgb/sge.c 		struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
cpu               123 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	unsigned int cpu;
cpu               129 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	cpu = get_cpu();
cpu               130 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	pool = per_cpu_ptr(ppm->pool, cpu);
cpu               149 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		 __func__, cpu, i, count, i + cpu * ppm->pool_index_max,
cpu               152 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	i += cpu * ppm->pool_index_max;
cpu               197 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		unsigned int cpu;
cpu               200 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		cpu = i / ppm->pool_index_max;
cpu               203 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		pool = per_cpu_ptr(ppm->pool, cpu);
cpu               212 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 			 __func__, cpu, i, pool->next);
cpu               355 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	unsigned int cpu;
cpu               374 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 	for_each_possible_cpu(cpu) {
cpu               375 drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c 		struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
cpu              2260 drivers/net/ethernet/cisco/enic/enic_main.c 		(*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
cpu                41 drivers/net/ethernet/cisco/enic/vnic_rss.h 	} cpu[32];
cpu               781 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	int cpu;
cpu               783 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
cpu               784 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		portal = qman_get_affine_portal(cpu);
cpu               894 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
cpu               899 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
cpu               900 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		channels[num_portals++] = qman_affine_channel(cpu);
cpu              2649 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	int cpu;
cpu              2651 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	for_each_possible_cpu(cpu) {
cpu              2652 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
cpu              2665 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	int cpu;
cpu              2667 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	for_each_possible_cpu(cpu) {
cpu              2668 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
cpu               558 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c 	int cpu, res;
cpu               572 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c 	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
cpu               573 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c 		portal = qman_get_affine_portal(cpu);
cpu               582 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c 		needs_revert[cpu] = true;
cpu               589 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c 	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
cpu               590 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c 		if (!needs_revert[cpu])
cpu               592 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c 		portal = qman_get_affine_portal(cpu);
cpu              2272 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 						    int cpu)
cpu              2278 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		if (priv->channel[i]->nctx.desired_cpu == cpu)
cpu              2284 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
cpu              1236 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	int cpu;
cpu              1244 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			cpu = ring_idx;
cpu              1246 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			cpu = ring_idx - q_num;
cpu              1249 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			cpu = ring_idx * 2;
cpu              1251 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			cpu = (ring_idx - q_num) * 2 + 1;
cpu              1255 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	cpumask_set_cpu(cpu, mask);
cpu              1257 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	return cpu;
cpu              1282 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	int cpu;
cpu              1305 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		cpu = hns_nic_init_affinity_mask(h->q_num, i,
cpu              1308 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (cpu_online(cpu))
cpu               183 drivers/net/ethernet/intel/e1000/e1000.h 	int cpu;
cpu              3091 drivers/net/ethernet/intel/i40e/i40e_main.c 	int cpu;
cpu              3100 drivers/net/ethernet/intel/i40e/i40e_main.c 	cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
cpu              3101 drivers/net/ethernet/intel/i40e/i40e_main.c 	netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
cpu              3776 drivers/net/ethernet/intel/i40e/i40e_main.c 	int cpu;
cpu              3818 drivers/net/ethernet/intel/i40e/i40e_main.c 		cpu = cpumask_local_spread(q_vector->v_idx, -1);
cpu              3819 drivers/net/ethernet/intel/i40e/i40e_main.c 		irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
cpu              11155 drivers/net/ethernet/intel/i40e/i40e_main.c static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
cpu               420 drivers/net/ethernet/intel/iavf/iavf_main.c 	int cpu;
cpu               464 drivers/net/ethernet/intel/iavf/iavf_main.c 		cpu = cpumask_local_spread(q_vector->v_idx, -1);
cpu               465 drivers/net/ethernet/intel/iavf/iavf_main.c 		irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
cpu               295 drivers/net/ethernet/intel/igb/igb.h 	int cpu;			/* CPU for DCA */
cpu              6595 drivers/net/ethernet/intel/igb/igb_main.c 			      int cpu)
cpu              6598 drivers/net/ethernet/intel/igb/igb_main.c 	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
cpu              6616 drivers/net/ethernet/intel/igb/igb_main.c 			      int cpu)
cpu              6619 drivers/net/ethernet/intel/igb/igb_main.c 	u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
cpu              6637 drivers/net/ethernet/intel/igb/igb_main.c 	int cpu = get_cpu();
cpu              6639 drivers/net/ethernet/intel/igb/igb_main.c 	if (q_vector->cpu == cpu)
cpu              6643 drivers/net/ethernet/intel/igb/igb_main.c 		igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
cpu              6646 drivers/net/ethernet/intel/igb/igb_main.c 		igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
cpu              6648 drivers/net/ethernet/intel/igb/igb_main.c 	q_vector->cpu = cpu;
cpu              6665 drivers/net/ethernet/intel/igb/igb_main.c 		adapter->q_vector[i]->cpu = -1;
cpu               450 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	int cpu;	    /* CPU for DCA */
cpu               597 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
cpu               601 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
cpu               608 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 				     unsigned int cpu)
cpu               614 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
cpu               621 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
cpu               715 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	int cpu, i, ddp_max;
cpu               729 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	for_each_possible_cpu(cpu)
cpu               730 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		ixgbe_fcoe_dma_pool_free(fcoe, cpu);
cpu               756 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	unsigned int cpu;
cpu               778 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 	for_each_possible_cpu(cpu) {
cpu               779 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
cpu               783 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c 		e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
cpu               838 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	int cpu = -1;
cpu               849 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 				cpu = v_idx;
cpu               850 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 				node = cpu_to_node(cpu);
cpu               865 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	if (cpu != -1)
cpu               866 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
cpu               871 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	q_vector->cpu = -1;
cpu              1276 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				int cpu)
cpu              1283 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		txctrl = dca3_get_tag(tx_ring->dev, cpu);
cpu              1313 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				int cpu)
cpu              1320 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		rxctrl = dca3_get_tag(rx_ring->dev, cpu);
cpu              1347 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	int cpu = get_cpu();
cpu              1349 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (q_vector->cpu == cpu)
cpu              1353 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_update_tx_dca(adapter, ring, cpu);
cpu              1356 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_update_rx_dca(adapter, ring, cpu);
cpu              1358 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	q_vector->cpu = cpu;
cpu              1376 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		adapter->q_vector[i]->cpu = -1;
cpu              7202 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			unsigned int cpu;
cpu              7204 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			for_each_possible_cpu(cpu) {
cpu              7205 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
cpu               122 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
cpu               705 drivers/net/ethernet/marvell/mvneta.c 	int cpu;
cpu               707 drivers/net/ethernet/marvell/mvneta.c 	for_each_possible_cpu(cpu) {
cpu               716 drivers/net/ethernet/marvell/mvneta.c 		cpu_stats = per_cpu_ptr(pp->stats, cpu);
cpu              1371 drivers/net/ethernet/marvell/mvneta.c 	int cpu;
cpu              1391 drivers/net/ethernet/marvell/mvneta.c 	for_each_present_cpu(cpu) {
cpu              1396 drivers/net/ethernet/marvell/mvneta.c 				if ((rxq % max_cpu) == cpu)
cpu              1400 drivers/net/ethernet/marvell/mvneta.c 				if ((txq % max_cpu) == cpu)
cpu              1408 drivers/net/ethernet/marvell/mvneta.c 				txq_map = (cpu == pp->rxq_def) ?
cpu              1416 drivers/net/ethernet/marvell/mvneta.c 		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
cpu              2528 drivers/net/ethernet/marvell/mvneta.c 	int cpu = smp_processor_id();
cpu              2534 drivers/net/ethernet/marvell/mvneta.c 		__netif_tx_lock(nq, cpu);
cpu              2986 drivers/net/ethernet/marvell/mvneta.c 	int cpu;
cpu              3029 drivers/net/ethernet/marvell/mvneta.c 		cpu = txq->id % num_present_cpus();
cpu              3031 drivers/net/ethernet/marvell/mvneta.c 		cpu = pp->rxq_def % num_present_cpus();
cpu              3032 drivers/net/ethernet/marvell/mvneta.c 	cpumask_set_cpu(cpu, &txq->affinity_mask);
cpu              3184 drivers/net/ethernet/marvell/mvneta.c 	int cpu;
cpu              3196 drivers/net/ethernet/marvell/mvneta.c 		for_each_online_cpu(cpu) {
cpu              3198 drivers/net/ethernet/marvell/mvneta.c 				per_cpu_ptr(pp->ports, cpu);
cpu              3219 drivers/net/ethernet/marvell/mvneta.c 	unsigned int cpu;
cpu              3224 drivers/net/ethernet/marvell/mvneta.c 		for_each_online_cpu(cpu) {
cpu              3226 drivers/net/ethernet/marvell/mvneta.c 				per_cpu_ptr(pp->ports, cpu);
cpu              3681 drivers/net/ethernet/marvell/mvneta.c 	int elected_cpu = 0, max_cpu, cpu, i = 0;
cpu              3691 drivers/net/ethernet/marvell/mvneta.c 	for_each_online_cpu(cpu) {
cpu              3696 drivers/net/ethernet/marvell/mvneta.c 			if ((rxq % max_cpu) == cpu)
cpu              3699 drivers/net/ethernet/marvell/mvneta.c 		if (cpu == elected_cpu)
cpu              3710 drivers/net/ethernet/marvell/mvneta.c 			txq_map = (cpu == elected_cpu) ?
cpu              3713 drivers/net/ethernet/marvell/mvneta.c 			txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
cpu              3716 drivers/net/ethernet/marvell/mvneta.c 		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
cpu              3721 drivers/net/ethernet/marvell/mvneta.c 		smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
cpu              3728 drivers/net/ethernet/marvell/mvneta.c static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
cpu              3733 drivers/net/ethernet/marvell/mvneta.c 	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
cpu              3752 drivers/net/ethernet/marvell/mvneta.c 		if (other_cpu != cpu) {
cpu              3786 drivers/net/ethernet/marvell/mvneta.c static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
cpu              3790 drivers/net/ethernet/marvell/mvneta.c 	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
cpu              3808 drivers/net/ethernet/marvell/mvneta.c static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
cpu              4178 drivers/net/ethernet/marvell/mvneta.c 	int cpu;
cpu              4187 drivers/net/ethernet/marvell/mvneta.c 		for_each_online_cpu(cpu) {
cpu              4189 drivers/net/ethernet/marvell/mvneta.c 				per_cpu_ptr(pp->ports, cpu);
cpu              4215 drivers/net/ethernet/marvell/mvneta.c 		for_each_online_cpu(cpu) {
cpu              4217 drivers/net/ethernet/marvell/mvneta.c 				per_cpu_ptr(pp->ports, cpu);
cpu              4498 drivers/net/ethernet/marvell/mvneta.c 	int cpu;
cpu              4676 drivers/net/ethernet/marvell/mvneta.c 		for_each_present_cpu(cpu) {
cpu              4678 drivers/net/ethernet/marvell/mvneta.c 				per_cpu_ptr(pp->ports, cpu);
cpu               198 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)	(0x2100 + 4 * (cpu))
cpu               200 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)	(0x2140 + 4 * (cpu))
cpu               202 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_AGGR_TXQ_STATUS_REG(cpu)		(0x2180 + 4 * (cpu))
cpu               204 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_AGGR_TXQ_INDEX_REG(cpu)		(0x21c0 + 4 * (cpu))
cpu              1446 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 	int nrxqs, cpu, cpus = num_possible_cpus();
cpu              1452 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 	cpu = rxq / nrxqs;
cpu              1454 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 	if (!cpu_online(cpu))
cpu                90 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
cpu                92 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	return cpu % priv->nthreads;
cpu              2689 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue, err, cpu;
cpu              2698 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		cpu = queue % num_present_cpus();
cpu              2699 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		netif_set_xps_queue(port->dev, cpumask_of(cpu), queue);
cpu              3570 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			unsigned int cpu;
cpu              3572 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			for_each_present_cpu(cpu) {
cpu              3573 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				if (mvpp2_cpu_to_thread(port->priv, cpu) ==
cpu              3575 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 					cpumask_set_cpu(cpu, qv->mask);
cpu              3938 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	unsigned int cpu;
cpu              3940 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for_each_possible_cpu(cpu) {
cpu              3947 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		cpu_stats = per_cpu_ptr(port->stats, cpu);
cpu              1348 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	u32 cpu, dma;
cpu              1350 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
cpu              1353 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	desc = mtk_qdma_phys_to_virt(ring, cpu);
cpu              1355 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	while ((cpu != dma) && budget) {
cpu              1381 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		cpu = next_cpu;
cpu              1384 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
cpu              1396 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	u32 cpu, dma;
cpu              1398 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	cpu = ring->cpu_idx;
cpu              1401 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	while ((cpu != dma) && budget) {
cpu              1402 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		tx_buf = &ring->buf[cpu];
cpu              1415 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		desc = &ring->dma[cpu];
cpu              1419 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
cpu              1422 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->cpu_idx = cpu;
cpu               741 drivers/net/ethernet/mellanox/mlx5/core/en.h 	int                        cpu;
cpu               258 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				       GFP_KERNEL, cpu_to_node(c->cpu));
cpu               347 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			      int wq_sz, int cpu)
cpu               352 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				   GFP_KERNEL, cpu_to_node(cpu));
cpu               391 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rqp->wq.db_numa_node = cpu_to_node(c->cpu);
cpu               501 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				      GFP_KERNEL, cpu_to_node(c->cpu));
cpu               507 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
cpu               551 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		pp_params.nid       = cpu_to_node(c->cpu);
cpu              1014 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	param->wq.db_numa_node = cpu_to_node(c->cpu);
cpu              1020 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
cpu              1076 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	param->wq.db_numa_node = cpu_to_node(c->cpu);
cpu              1082 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
cpu              1168 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	param->wq.db_numa_node = cpu_to_node(c->cpu);
cpu              1174 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
cpu              1580 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	param->wq.buf_numa_node = cpu_to_node(c->cpu);
cpu              1581 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	param->wq.db_numa_node  = cpu_to_node(c->cpu);
cpu              1835 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq));
cpu              1837 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		cpumask_set_cpu(cpu, c->xps_cpumask);
cpu              1968 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
cpu              1980 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
cpu              1988 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	c->cpu      = cpu;
cpu              2065 drivers/net/ethernet/mscc/ocelot.c 	int i, ret, cpu = ocelot->num_phys_ports;
cpu              2146 drivers/net/ethernet/mscc/ocelot.c 	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
cpu              2147 drivers/net/ethernet/mscc/ocelot.c 	ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
cpu              2149 drivers/net/ethernet/mscc/ocelot.c 			 ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
cpu              2150 drivers/net/ethernet/mscc/ocelot.c 			 ANA_PORT_PORT_CFG, cpu);
cpu              2168 drivers/net/ethernet/mscc/ocelot.c 			 QSYS_SWITCH_PORT_MODE, cpu);
cpu              2170 drivers/net/ethernet/mscc/ocelot.c 			 SYS_PORT_MODE_INCL_INJ_HDR(1), SYS_PORT_MODE, cpu);
cpu               190 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	int cpu;
cpu              1085 drivers/net/ethernet/myricom/myri10ge/myri10ge.c myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
cpu              1093 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	int cpu = get_cpu();
cpu              1096 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	if (cpu != ss->cpu) {
cpu              1097 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
cpu              1099 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 			myri10ge_write_dca(ss, cpu, tag);
cpu              1100 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		ss->cpu = cpu;
cpu              1126 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		mgp->ss[i].cpu = -1;
cpu              1733 drivers/net/ethernet/nvidia/forcedeth.c static void nv_get_stats(int cpu, struct fe_priv *np,
cpu              1736 drivers/net/ethernet/nvidia/forcedeth.c 	struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
cpu              1778 drivers/net/ethernet/nvidia/forcedeth.c 	int cpu;
cpu              1791 drivers/net/ethernet/nvidia/forcedeth.c 	for_each_online_cpu(cpu)
cpu              1792 drivers/net/ethernet/nvidia/forcedeth.c 		nv_get_stats(cpu, np, storage);
cpu               200 drivers/net/ethernet/pensando/ionic/ionic_dev.h 	unsigned int cpu;
cpu               412 drivers/net/ethernet/pensando/ionic/ionic_lif.c 		new->intr.cpu = new->intr.index % num_online_cpus();
cpu               413 drivers/net/ethernet/pensando/ionic/ionic_lif.c 		if (cpu_online(new->intr.cpu))
cpu               414 drivers/net/ethernet/pensando/ionic/ionic_lif.c 			cpumask_set_cpu(new->intr.cpu,
cpu               109 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c 	unsigned int cpu, start;
cpu               113 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c 	for_each_possible_cpu(cpu) {
cpu               114 drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c 		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
cpu              1389 drivers/net/ethernet/sfc/efx.c 	int cpu;
cpu              1401 drivers/net/ethernet/sfc/efx.c 		for_each_online_cpu(cpu) {
cpu              1402 drivers/net/ethernet/sfc/efx.c 			if (!cpumask_test_cpu(cpu, thread_mask)) {
cpu              1405 drivers/net/ethernet/sfc/efx.c 					   topology_sibling_cpumask(cpu));
cpu              1567 drivers/net/ethernet/sfc/efx.c 	unsigned int cpu;
cpu              1570 drivers/net/ethernet/sfc/efx.c 		cpu = cpumask_local_spread(channel->channel,
cpu              1572 drivers/net/ethernet/sfc/efx.c 		irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
cpu              1328 drivers/net/ethernet/sfc/falcon/efx.c 	int cpu;
cpu              1340 drivers/net/ethernet/sfc/falcon/efx.c 		for_each_online_cpu(cpu) {
cpu              1341 drivers/net/ethernet/sfc/falcon/efx.c 			if (!cpumask_test_cpu(cpu, thread_mask)) {
cpu              1344 drivers/net/ethernet/sfc/falcon/efx.c 					   topology_sibling_cpumask(cpu));
cpu               134 drivers/net/ethernet/sfc/falcon/selftest.c 	int cpu;
cpu               155 drivers/net/ethernet/sfc/falcon/selftest.c 		cpu = ef4_nic_irq_test_irq_cpu(efx);
cpu               156 drivers/net/ethernet/sfc/falcon/selftest.c 		if (cpu >= 0)
cpu               166 drivers/net/ethernet/sfc/falcon/selftest.c 		  INT_MODE(efx), cpu);
cpu               791 drivers/net/ethernet/sfc/falcon/selftest.c 	int cpu;
cpu               794 drivers/net/ethernet/sfc/falcon/selftest.c 		cpu = ef4_nic_event_test_irq_cpu(channel);
cpu               795 drivers/net/ethernet/sfc/falcon/selftest.c 		if (cpu < 0)
cpu               802 drivers/net/ethernet/sfc/falcon/selftest.c 				  channel->channel, cpu);
cpu               134 drivers/net/ethernet/sfc/selftest.c 	int cpu;
cpu               155 drivers/net/ethernet/sfc/selftest.c 		cpu = efx_nic_irq_test_irq_cpu(efx);
cpu               156 drivers/net/ethernet/sfc/selftest.c 		if (cpu >= 0)
cpu               166 drivers/net/ethernet/sfc/selftest.c 		  INT_MODE(efx), cpu);
cpu               791 drivers/net/ethernet/sfc/selftest.c 	int cpu;
cpu               794 drivers/net/ethernet/sfc/selftest.c 		cpu = efx_nic_event_test_irq_cpu(channel);
cpu               795 drivers/net/ethernet/sfc/selftest.c 		if (cpu < 0)
cpu               802 drivers/net/ethernet/sfc/selftest.c 				  channel->channel, cpu);
cpu              1411 drivers/net/hyperv/netvsc_drv.c 	int i, j, cpu;
cpu              1448 drivers/net/hyperv/netvsc_drv.c 	for_each_present_cpu(cpu) {
cpu              1449 drivers/net/hyperv/netvsc_drv.c 		struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
cpu              1463 drivers/net/hyperv/netvsc_drv.c 	int i, cpu;
cpu              1491 drivers/net/hyperv/netvsc_drv.c 		for_each_present_cpu(cpu) {
cpu              1493 drivers/net/hyperv/netvsc_drv.c 				sprintf(p, pcpu_stats[i].name, cpu);
cpu              2156 drivers/net/macsec.c 	int cpu;
cpu              2158 drivers/net/macsec.c 	for_each_possible_cpu(cpu) {
cpu              2159 drivers/net/macsec.c 		const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
cpu              2177 drivers/net/macsec.c 	int cpu;
cpu              2179 drivers/net/macsec.c 	for_each_possible_cpu(cpu) {
cpu              2180 drivers/net/macsec.c 		const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
cpu              2203 drivers/net/macsec.c 	int cpu;
cpu              2205 drivers/net/macsec.c 	for_each_possible_cpu(cpu) {
cpu              2210 drivers/net/macsec.c 		stats = per_cpu_ptr(pstats, cpu);
cpu              2267 drivers/net/macsec.c 	int cpu;
cpu              2269 drivers/net/macsec.c 	for_each_possible_cpu(cpu) {
cpu              2274 drivers/net/macsec.c 		stats = per_cpu_ptr(pstats, cpu);
cpu              2307 drivers/net/macsec.c 	int cpu;
cpu              2309 drivers/net/macsec.c 	for_each_possible_cpu(cpu) {
cpu              2314 drivers/net/macsec.c 		stats = per_cpu_ptr(pstats, cpu);
cpu              2933 drivers/net/macsec.c 	int cpu;
cpu              2938 drivers/net/macsec.c 	for_each_possible_cpu(cpu) {
cpu              2943 drivers/net/macsec.c 		stats = per_cpu_ptr(dev->tstats, cpu);
cpu              1012 drivers/net/ppp/ppp_generic.c 	int cpu;
cpu              1033 drivers/net/ppp/ppp_generic.c 	for_each_possible_cpu(cpu)
cpu              1034 drivers/net/ppp/ppp_generic.c 		(*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
cpu               130 drivers/net/usb/qmi_wwan.c 	int cpu;
cpu               134 drivers/net/usb/qmi_wwan.c 	for_each_possible_cpu(cpu) {
cpu               139 drivers/net/usb/qmi_wwan.c 		stats64 = per_cpu_ptr(priv->stats64, cpu);
cpu               990 drivers/net/usb/usbnet.c 	int cpu;
cpu               994 drivers/net/usb/usbnet.c 	for_each_possible_cpu(cpu) {
cpu               999 drivers/net/usb/usbnet.c 		stats64 = per_cpu_ptr(dev->stats64, cpu);
cpu               287 drivers/net/veth.c 	int cpu;
cpu               291 drivers/net/veth.c 	for_each_possible_cpu(cpu) {
cpu               292 drivers/net/veth.c 		struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
cpu              1937 drivers/net/virtio_net.c 	int i, j, cpu;
cpu              1951 drivers/net/virtio_net.c 	cpu = cpumask_next(-1, cpu_online_mask);
cpu              1957 drivers/net/virtio_net.c 			cpumask_set_cpu(cpu, mask);
cpu              1958 drivers/net/virtio_net.c 			cpu = cpumask_next_wrap(cpu, cpu_online_mask,
cpu              1971 drivers/net/virtio_net.c static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
cpu              1979 drivers/net/virtio_net.c static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
cpu              1987 drivers/net/virtio_net.c static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
cpu               990 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 	struct brcmf_core_priv *cpu;
cpu              1004 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 		cpu = container_of(core, struct brcmf_core_priv, pub);
cpu              1007 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 		val = chip->ops->read32(chip->ctx, cpu->wrapbase + BCMA_IOCTL);
cpu               786 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 					   int cpu,
cpu               793 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	if (cpu == 1) {
cpu               836 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		if (cpu == 1)
cpu               843 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		if (cpu == 1)
cpu               856 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 				      int cpu,
cpu               862 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	if (cpu == 1)
cpu              1699 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	int iter_rx_q, i, ret, cpu, offset;
cpu              1710 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		cpu = cpumask_next(i - offset, cpu_online_mask);
cpu              1711 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
cpu               120 drivers/net/wireless/quantenna/qtnfmac/core.c 	int cpu;
cpu               127 drivers/net/wireless/quantenna/qtnfmac/core.c 	for_each_possible_cpu(cpu) {
cpu               132 drivers/net/wireless/quantenna/qtnfmac/core.c 		stats64 = per_cpu_ptr(vif->stats64, cpu);
cpu              1099 drivers/net/xen-netfront.c 	int cpu;
cpu              1101 drivers/net/xen-netfront.c 	for_each_possible_cpu(cpu) {
cpu              1102 drivers/net/xen-netfront.c 		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
cpu              1103 drivers/net/xen-netfront.c 		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
cpu               899 drivers/nvdimm/region_devs.c 	unsigned int cpu, lane;
cpu               901 drivers/nvdimm/region_devs.c 	cpu = get_cpu();
cpu               905 drivers/nvdimm/region_devs.c 		lane = cpu % nd_region->num_lanes;
cpu               906 drivers/nvdimm/region_devs.c 		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
cpu               911 drivers/nvdimm/region_devs.c 		lane = cpu;
cpu               920 drivers/nvdimm/region_devs.c 		unsigned int cpu = get_cpu();
cpu               923 drivers/nvdimm/region_devs.c 		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
cpu                87 drivers/nvme/target/tcp.c 	int			cpu;
cpu               492 drivers/nvme/target/tcp.c 	queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
cpu              1206 drivers/nvme/target/tcp.c 		queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
cpu              1366 drivers/nvme/target/tcp.c 		queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
cpu              1386 drivers/nvme/target/tcp.c 		queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
cpu              1504 drivers/nvme/target/tcp.c 	queue->cpu = port->last_cpu;
cpu              1515 drivers/nvme/target/tcp.c 	queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
cpu               365 drivers/of/base.c bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
cpu               367 drivers/of/base.c 	return (u32)phys_id == cpu;
cpu               376 drivers/of/base.c 			const char *prop_name, int cpu, unsigned int *thread)
cpu               384 drivers/of/base.c 	if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
cpu               391 drivers/of/base.c 		if (arch_match_cpu_phys_id(cpu, hwid)) {
cpu               408 drivers/of/base.c 					      int cpu, unsigned int *thread)
cpu               417 drivers/of/base.c 					   cpu, thread))
cpu               420 drivers/of/base.c 	return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
cpu               442 drivers/of/base.c struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
cpu               447 drivers/of/base.c 		if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
cpu               464 drivers/of/base.c 	int cpu;
cpu               468 drivers/of/base.c 	for_each_possible_cpu(cpu) {
cpu               469 drivers/of/base.c 		np = of_cpu_device_node_get(cpu);
cpu               473 drivers/of/base.c 			return cpu;
cpu              2225 drivers/of/base.c int of_find_last_cache_level(unsigned int cpu)
cpu              2228 drivers/of/base.c 	struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
cpu               112 drivers/opp/cpu.c 	int cpu;
cpu               116 drivers/opp/cpu.c 	for_each_cpu(cpu, cpumask) {
cpu               117 drivers/opp/cpu.c 		if (cpu == last_cpu)
cpu               120 drivers/opp/cpu.c 		cpu_dev = get_cpu_device(cpu);
cpu               123 drivers/opp/cpu.c 			       cpu);
cpu               161 drivers/opp/cpu.c 	int cpu, ret = 0;
cpu               167 drivers/opp/cpu.c 	for_each_cpu(cpu, cpumask) {
cpu               168 drivers/opp/cpu.c 		if (cpu == cpu_dev->id)
cpu               171 drivers/opp/cpu.c 		dev = get_cpu_device(cpu);
cpu               174 drivers/opp/cpu.c 				__func__, cpu);
cpu               181 drivers/opp/cpu.c 				__func__, cpu);
cpu               871 drivers/opp/of.c 	int cpu, ret;
cpu               876 drivers/opp/of.c 	for_each_cpu(cpu, cpumask) {
cpu               877 drivers/opp/of.c 		cpu_dev = get_cpu_device(cpu);
cpu               880 drivers/opp/of.c 			       cpu);
cpu               892 drivers/opp/of.c 				 __func__, cpu, ret);
cpu               902 drivers/opp/of.c 	_dev_pm_opp_cpumask_remove_table(cpumask, cpu);
cpu               929 drivers/opp/of.c 	int cpu, ret = 0;
cpu               944 drivers/opp/of.c 	for_each_possible_cpu(cpu) {
cpu               945 drivers/opp/of.c 		if (cpu == cpu_dev->id)
cpu               948 drivers/opp/of.c 		cpu_np = of_cpu_device_node_get(cpu);
cpu               951 drivers/opp/of.c 				__func__, cpu);
cpu               967 drivers/opp/of.c 			cpumask_set_cpu(cpu, cpumask);
cpu              1054 drivers/opp/of.c 					 int cpu)
cpu              1064 drivers/opp/of.c 	cpu_dev = get_cpu_device(cpu);
cpu              1106 drivers/opp/of.c 	int ret, nr_opp, cpu = cpumask_first(cpus);
cpu              1111 drivers/opp/of.c 	cpu_dev = get_cpu_device(cpu);
cpu               456 drivers/oprofile/buffer_sync.c static void mark_done(int cpu)
cpu               460 drivers/oprofile/buffer_sync.c 	cpumask_set_cpu(cpu, marked_cpus);
cpu               493 drivers/oprofile/buffer_sync.c void sync_buffer(int cpu)
cpu               510 drivers/oprofile/buffer_sync.c 	add_cpu_switch(cpu);
cpu               512 drivers/oprofile/buffer_sync.c 	op_cpu_buffer_reset(cpu);
cpu               513 drivers/oprofile/buffer_sync.c 	available = op_cpu_buffer_entries(cpu);
cpu               516 drivers/oprofile/buffer_sync.c 		sample = op_cpu_buffer_read_entry(&entry, cpu);
cpu               564 drivers/oprofile/buffer_sync.c 	mark_done(cpu);
cpu                20 drivers/oprofile/buffer_sync.h void sync_buffer(int cpu);
cpu                87 drivers/oprofile/cpu_buffer.c 		b->cpu = i;
cpu               163 drivers/oprofile/cpu_buffer.c struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
cpu               166 drivers/oprofile/cpu_buffer.c 	e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
cpu               178 drivers/oprofile/cpu_buffer.c unsigned long op_cpu_buffer_entries(int cpu)
cpu               180 drivers/oprofile/cpu_buffer.c 	return ring_buffer_entries_cpu(op_ring_buffer, cpu);
cpu               456 drivers/oprofile/cpu_buffer.c 	if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) {
cpu               460 drivers/oprofile/cpu_buffer.c 	sync_buffer(b->cpu);
cpu                50 drivers/oprofile/cpu_buffer.h 	int cpu;
cpu                62 drivers/oprofile/cpu_buffer.h static inline void op_cpu_buffer_reset(int cpu)
cpu                64 drivers/oprofile/cpu_buffer.h 	struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
cpu                80 drivers/oprofile/cpu_buffer.h struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
cpu                81 drivers/oprofile/cpu_buffer.h unsigned long op_cpu_buffer_entries(int cpu);
cpu                37 drivers/oprofile/nmi_timer_int.c static int nmi_timer_start_cpu(int cpu)
cpu                39 drivers/oprofile/nmi_timer_int.c 	struct perf_event *event = per_cpu(nmi_timer_events, cpu);
cpu                42 drivers/oprofile/nmi_timer_int.c 		event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
cpu                46 drivers/oprofile/nmi_timer_int.c 		per_cpu(nmi_timer_events, cpu) = event;
cpu                55 drivers/oprofile/nmi_timer_int.c static void nmi_timer_stop_cpu(int cpu)
cpu                57 drivers/oprofile/nmi_timer_int.c 	struct perf_event *event = per_cpu(nmi_timer_events, cpu);
cpu                63 drivers/oprofile/nmi_timer_int.c static int nmi_timer_cpu_online(unsigned int cpu)
cpu                65 drivers/oprofile/nmi_timer_int.c 	nmi_timer_start_cpu(cpu);
cpu                68 drivers/oprofile/nmi_timer_int.c static int nmi_timer_cpu_predown(unsigned int cpu)
cpu                70 drivers/oprofile/nmi_timer_int.c 	nmi_timer_stop_cpu(cpu);
cpu                76 drivers/oprofile/nmi_timer_int.c 	int cpu;
cpu                80 drivers/oprofile/nmi_timer_int.c 	for_each_online_cpu(cpu)
cpu                81 drivers/oprofile/nmi_timer_int.c 		nmi_timer_start_cpu(cpu);
cpu                89 drivers/oprofile/nmi_timer_int.c 	int cpu;
cpu                92 drivers/oprofile/nmi_timer_int.c 	for_each_online_cpu(cpu)
cpu                93 drivers/oprofile/nmi_timer_int.c 		nmi_timer_stop_cpu(cpu);
cpu               103 drivers/oprofile/nmi_timer_int.c 	int cpu;
cpu               106 drivers/oprofile/nmi_timer_int.c 	for_each_possible_cpu(cpu) {
cpu               107 drivers/oprofile/nmi_timer_int.c 		event = per_cpu(nmi_timer_events, cpu);
cpu               111 drivers/oprofile/nmi_timer_int.c 		per_cpu(nmi_timer_events, cpu) = NULL;
cpu                40 drivers/oprofile/oprofile_perf.c 	u32 cpu = smp_processor_id();
cpu                43 drivers/oprofile/oprofile_perf.c 		if (per_cpu(perf_events, cpu)[id] == event)
cpu                50 drivers/oprofile/oprofile_perf.c 				"on cpu %u\n", cpu);
cpu                75 drivers/oprofile/oprofile_perf.c static int op_create_counter(int cpu, int event)
cpu                79 drivers/oprofile/oprofile_perf.c 	if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
cpu                83 drivers/oprofile/oprofile_perf.c 						  cpu, NULL,
cpu                92 drivers/oprofile/oprofile_perf.c 				"on CPU %d\n", event, cpu);
cpu                96 drivers/oprofile/oprofile_perf.c 	per_cpu(perf_events, cpu)[event] = pevent;
cpu               101 drivers/oprofile/oprofile_perf.c static void op_destroy_counter(int cpu, int event)
cpu               103 drivers/oprofile/oprofile_perf.c 	struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
cpu               107 drivers/oprofile/oprofile_perf.c 		per_cpu(perf_events, cpu)[event] = NULL;
cpu               117 drivers/oprofile/oprofile_perf.c 	int cpu, event, ret = 0;
cpu               119 drivers/oprofile/oprofile_perf.c 	for_each_online_cpu(cpu) {
cpu               121 drivers/oprofile/oprofile_perf.c 			ret = op_create_counter(cpu, event);
cpu               135 drivers/oprofile/oprofile_perf.c 	int cpu, event;
cpu               137 drivers/oprofile/oprofile_perf.c 	for_each_online_cpu(cpu)
cpu               139 drivers/oprofile/oprofile_perf.c 			op_destroy_counter(cpu, event);
cpu               257 drivers/oprofile/oprofile_perf.c 	int cpu, id;
cpu               260 drivers/oprofile/oprofile_perf.c 	for_each_possible_cpu(cpu) {
cpu               262 drivers/oprofile/oprofile_perf.c 			event = per_cpu(perf_events, cpu)[id];
cpu               267 drivers/oprofile/oprofile_perf.c 		kfree(per_cpu(perf_events, cpu));
cpu               276 drivers/oprofile/oprofile_perf.c 	int cpu, ret = 0;
cpu               300 drivers/oprofile/oprofile_perf.c 	for_each_possible_cpu(cpu) {
cpu               301 drivers/oprofile/oprofile_perf.c 		per_cpu(perf_events, cpu) = kcalloc(num_counters,
cpu               303 drivers/oprofile/oprofile_perf.c 		if (!per_cpu(perf_events, cpu)) {
cpu               305 drivers/oprofile/oprofile_perf.c 					"for cpu %d\n", num_counters, cpu);
cpu                56 drivers/oprofile/timer_int.c static void __oprofile_hrtimer_stop(int cpu)
cpu                58 drivers/oprofile/timer_int.c 	struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
cpu                68 drivers/oprofile/timer_int.c 	int cpu;
cpu                71 drivers/oprofile/timer_int.c 	for_each_online_cpu(cpu)
cpu                72 drivers/oprofile/timer_int.c 		__oprofile_hrtimer_stop(cpu);
cpu                77 drivers/oprofile/timer_int.c static int oprofile_timer_online(unsigned int cpu)
cpu                85 drivers/oprofile/timer_int.c static int oprofile_timer_prep_down(unsigned int cpu)
cpu                87 drivers/oprofile/timer_int.c 	__oprofile_hrtimer_stop(cpu);
cpu              1197 drivers/pci/controller/pci-hyperv.c 	int cpu, nr_bank;
cpu              1259 drivers/pci/controller/pci-hyperv.c 		for_each_cpu_and(cpu, dest, cpu_online_mask) {
cpu              1261 drivers/pci/controller/pci-hyperv.c 				(1ULL << hv_cpu_number_to_vp_number(cpu));
cpu              1320 drivers/pci/controller/pci-hyperv.c 	int cpu;
cpu              1332 drivers/pci/controller/pci-hyperv.c 	cpu = cpumask_first_and(affinity, cpu_online_mask);
cpu              1334 drivers/pci/controller/pci-hyperv.c 		hv_cpu_number_to_vp_number(cpu);
cpu               374 drivers/pci/controller/pci-xgene-msi.c static int xgene_msi_hwirq_alloc(unsigned int cpu)
cpu               382 drivers/pci/controller/pci-xgene-msi.c 	for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
cpu               401 drivers/pci/controller/pci-xgene-msi.c 			cpumask_set_cpu(cpu, mask);
cpu               421 drivers/pci/controller/pci-xgene-msi.c static int xgene_msi_hwirq_free(unsigned int cpu)
cpu               427 drivers/pci/controller/pci-xgene-msi.c 	for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
cpu               466 drivers/pci/controller/pcie-iproc-msi.c static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
cpu               470 drivers/pci/controller/pcie-iproc-msi.c 	for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
cpu               476 drivers/pci/controller/pcie-iproc-msi.c static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
cpu               482 drivers/pci/controller/pcie-iproc-msi.c 	for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
cpu               489 drivers/pci/controller/pcie-iproc-msi.c 			cpumask_set_cpu(cpu, mask);
cpu               503 drivers/pci/controller/pcie-iproc-msi.c 			iproc_msi_irq_free(msi, cpu);
cpu               515 drivers/pci/controller/pcie-iproc-msi.c 	unsigned int cpu;
cpu               619 drivers/pci/controller/pcie-iproc-msi.c 	for_each_online_cpu(cpu) {
cpu               620 drivers/pci/controller/pcie-iproc-msi.c 		ret = iproc_msi_irq_setup(msi, cpu);
cpu               630 drivers/pci/controller/pcie-iproc-msi.c 	for_each_online_cpu(cpu)
cpu               631 drivers/pci/controller/pcie-iproc-msi.c 		iproc_msi_irq_free(msi, cpu);
cpu               651 drivers/pci/controller/pcie-iproc-msi.c 	unsigned int i, cpu;
cpu               658 drivers/pci/controller/pcie-iproc-msi.c 	for_each_online_cpu(cpu)
cpu               659 drivers/pci/controller/pcie-iproc-msi.c 		iproc_msi_irq_free(msi, cpu);
cpu               334 drivers/pci/pci-driver.c 	int error, node, cpu;
cpu               353 drivers/pci/pci-driver.c 		cpu = nr_cpu_ids;
cpu               355 drivers/pci/pci-driver.c 		cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
cpu               357 drivers/pci/pci-driver.c 	if (cpu < nr_cpu_ids)
cpu               358 drivers/pci/pci-driver.c 		error = work_on_cpu(cpu, local_pci_probe, &ddi);
cpu               103 drivers/perf/arm-cci.c 	int cpu;
cpu              1339 drivers/perf/arm-cci.c 	if (event->cpu < 0)
cpu              1341 drivers/perf/arm-cci.c 	event->cpu = cci_pmu->cpu;
cpu              1368 drivers/perf/arm-cci.c 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu));
cpu              1444 drivers/perf/arm-cci.c static int cci_pmu_offline_cpu(unsigned int cpu)
cpu              1448 drivers/perf/arm-cci.c 	if (!g_cci_pmu || cpu != g_cci_pmu->cpu)
cpu              1451 drivers/perf/arm-cci.c 	target = cpumask_any_but(cpu_online_mask, cpu);
cpu              1455 drivers/perf/arm-cci.c 	perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target);
cpu              1456 drivers/perf/arm-cci.c 	g_cci_pmu->cpu = target;
cpu              1688 drivers/perf/arm-cci.c 	cci_pmu->cpu = raw_smp_processor_id();
cpu               163 drivers/perf/arm-ccn.c 	unsigned int cpu;
cpu               555 drivers/perf/arm-ccn.c 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu));
cpu               742 drivers/perf/arm-ccn.c 	if (event->cpu < 0) {
cpu               755 drivers/perf/arm-ccn.c 	event->cpu = ccn->dt.cpu;
cpu              1205 drivers/perf/arm-ccn.c static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
cpu              1211 drivers/perf/arm-ccn.c 	if (cpu != dt->cpu)
cpu              1213 drivers/perf/arm-ccn.c 	target = cpumask_any_but(cpu_online_mask, cpu);
cpu              1216 drivers/perf/arm-ccn.c 	perf_pmu_migrate_context(&dt->pmu, cpu, target);
cpu              1217 drivers/perf/arm-ccn.c 	dt->cpu = target;
cpu              1219 drivers/perf/arm-ccn.c 		WARN_ON(irq_set_affinity_hint(ccn->irq, cpumask_of(dt->cpu)));
cpu              1295 drivers/perf/arm-ccn.c 	ccn->dt.cpu = raw_smp_processor_id();
cpu              1299 drivers/perf/arm-ccn.c 		err = irq_set_affinity_hint(ccn->irq, cpumask_of(ccn->dt.cpu));
cpu               233 drivers/perf/arm_dsu_pmu.c static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu)
cpu               239 drivers/perf/arm_dsu_pmu.c 	return cpumask_any_but(&online_supported, cpu);
cpu               557 drivers/perf/arm_dsu_pmu.c 	if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
cpu               567 drivers/perf/arm_dsu_pmu.c 	if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) {
cpu               578 drivers/perf/arm_dsu_pmu.c 	event->cpu = cpumask_first(&dsu_pmu->active_cpu);
cpu               579 drivers/perf/arm_dsu_pmu.c 	if (event->cpu >= nr_cpu_ids)
cpu               610 drivers/perf/arm_dsu_pmu.c 	int i = 0, n, cpu;
cpu               620 drivers/perf/arm_dsu_pmu.c 		cpu = of_cpu_node_to_id(cpu_node);
cpu               627 drivers/perf/arm_dsu_pmu.c 		if (cpu < 0)
cpu               629 drivers/perf/arm_dsu_pmu.c 		cpumask_set_cpu(cpu, mask);
cpu               656 drivers/perf/arm_dsu_pmu.c static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
cpu               658 drivers/perf/arm_dsu_pmu.c 	cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
cpu               660 drivers/perf/arm_dsu_pmu.c 		pr_warn("Failed to set irq affinity to %d\n", cpu);
cpu               767 drivers/perf/arm_dsu_pmu.c static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
cpu               772 drivers/perf/arm_dsu_pmu.c 	if (!cpumask_test_cpu(cpu, &dsu_pmu->associated_cpus))
cpu               780 drivers/perf/arm_dsu_pmu.c 	dsu_pmu_set_active_cpu(cpu, dsu_pmu);
cpu               785 drivers/perf/arm_dsu_pmu.c static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
cpu               791 drivers/perf/arm_dsu_pmu.c 	if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
cpu               794 drivers/perf/arm_dsu_pmu.c 	dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
cpu               801 drivers/perf/arm_dsu_pmu.c 	perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
cpu               433 drivers/perf/arm_pmu.c 	if (event->cpu != -1 &&
cpu               434 drivers/perf/arm_pmu.c 		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
cpu               480 drivers/perf/arm_pmu.c 	unsigned int cpu = smp_processor_id();
cpu               483 drivers/perf/arm_pmu.c 	ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
cpu               537 drivers/perf/arm_pmu.c 	int cpu, count = 0;
cpu               539 drivers/perf/arm_pmu.c 	for_each_possible_cpu(cpu) {
cpu               540 drivers/perf/arm_pmu.c 		if (per_cpu(cpu_irq, cpu) == irq)
cpu               547 drivers/perf/arm_pmu.c void armpmu_free_irq(int irq, int cpu)
cpu               549 drivers/perf/arm_pmu.c 	if (per_cpu(cpu_irq, cpu) == 0)
cpu               551 drivers/perf/arm_pmu.c 	if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
cpu               555 drivers/perf/arm_pmu.c 		free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
cpu               559 drivers/perf/arm_pmu.c 	per_cpu(cpu_irq, cpu) = 0;
cpu               562 drivers/perf/arm_pmu.c int armpmu_request_irq(int irq, int cpu)
cpu               572 drivers/perf/arm_pmu.c 		err = irq_force_affinity(irq, cpumask_of(cpu));
cpu               576 drivers/perf/arm_pmu.c 				irq, cpu);
cpu               586 drivers/perf/arm_pmu.c 				  per_cpu_ptr(&cpu_armpmu, cpu));
cpu               595 drivers/perf/arm_pmu.c 	per_cpu(cpu_irq, cpu) = irq;
cpu               603 drivers/perf/arm_pmu.c static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
cpu               606 drivers/perf/arm_pmu.c 	return per_cpu(hw_events->irq, cpu);
cpu               615 drivers/perf/arm_pmu.c static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
cpu               620 drivers/perf/arm_pmu.c 	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
cpu               625 drivers/perf/arm_pmu.c 	per_cpu(cpu_armpmu, cpu) = pmu;
cpu               627 drivers/perf/arm_pmu.c 	irq = armpmu_get_cpu_irq(pmu, cpu);
cpu               638 drivers/perf/arm_pmu.c static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
cpu               643 drivers/perf/arm_pmu.c 	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
cpu               646 drivers/perf/arm_pmu.c 	irq = armpmu_get_cpu_irq(pmu, cpu);
cpu               654 drivers/perf/arm_pmu.c 	per_cpu(cpu_armpmu, cpu) = NULL;
cpu               784 drivers/perf/arm_pmu.c 	int cpu;
cpu               822 drivers/perf/arm_pmu.c 	for_each_possible_cpu(cpu) {
cpu               825 drivers/perf/arm_pmu.c 		events = per_cpu_ptr(pmu->hw_events, cpu);
cpu                21 drivers/perf/arm_pmu_acpi.c static int arm_pmu_acpi_register_irq(int cpu)
cpu                26 drivers/perf/arm_pmu_acpi.c 	gicc = acpi_cpu_get_madt_gicc(cpu);
cpu                59 drivers/perf/arm_pmu_acpi.c static void arm_pmu_acpi_unregister_irq(int cpu)
cpu                64 drivers/perf/arm_pmu_acpi.c 	gicc = acpi_cpu_get_madt_gicc(cpu);
cpu                93 drivers/perf/arm_pmu_acpi.c 	int cpu, hetid, irq, ret;
cpu               101 drivers/perf/arm_pmu_acpi.c 	for_each_possible_cpu(cpu) {
cpu               104 drivers/perf/arm_pmu_acpi.c 		gicc = acpi_cpu_get_madt_gicc(cpu);
cpu               112 drivers/perf/arm_pmu_acpi.c 			hetid = find_acpi_cpu_topology_hetero_id(cpu);
cpu               115 drivers/perf/arm_pmu_acpi.c 			   (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
cpu               143 drivers/perf/arm_pmu_acpi.c 	int irq, cpu, irq_cpu, err;
cpu               145 drivers/perf/arm_pmu_acpi.c 	for_each_possible_cpu(cpu) {
cpu               146 drivers/perf/arm_pmu_acpi.c 		irq = arm_pmu_acpi_register_irq(cpu);
cpu               150 drivers/perf/arm_pmu_acpi.c 				cpu, err);
cpu               153 drivers/perf/arm_pmu_acpi.c 			pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
cpu               161 drivers/perf/arm_pmu_acpi.c 		per_cpu(pmu_irqs, cpu) = irq;
cpu               162 drivers/perf/arm_pmu_acpi.c 		armpmu_request_irq(irq, cpu);
cpu               168 drivers/perf/arm_pmu_acpi.c 	for_each_possible_cpu(cpu) {
cpu               169 drivers/perf/arm_pmu_acpi.c 		irq = per_cpu(pmu_irqs, cpu);
cpu               173 drivers/perf/arm_pmu_acpi.c 		arm_pmu_acpi_unregister_irq(cpu);
cpu               192 drivers/perf/arm_pmu_acpi.c 	int cpu;
cpu               194 drivers/perf/arm_pmu_acpi.c 	for_each_possible_cpu(cpu) {
cpu               195 drivers/perf/arm_pmu_acpi.c 		pmu = per_cpu(probed_pmus, cpu);
cpu               221 drivers/perf/arm_pmu_acpi.c 	int cpu;
cpu               226 drivers/perf/arm_pmu_acpi.c 	for_each_cpu(cpu, &pmu->supported_cpus) {
cpu               227 drivers/perf/arm_pmu_acpi.c 		int other_irq = per_cpu(hw_events->irq, cpu);
cpu               252 drivers/perf/arm_pmu_acpi.c static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
cpu               259 drivers/perf/arm_pmu_acpi.c 	if (per_cpu(probed_pmus, cpu))
cpu               262 drivers/perf/arm_pmu_acpi.c 	irq = per_cpu(pmu_irqs, cpu);
cpu               268 drivers/perf/arm_pmu_acpi.c 	per_cpu(probed_pmus, cpu) = pmu;
cpu               272 drivers/perf/arm_pmu_acpi.c 		per_cpu(hw_events->irq, cpu) = irq;
cpu               275 drivers/perf/arm_pmu_acpi.c 	cpumask_set_cpu(cpu, &pmu->supported_cpus);
cpu               290 drivers/perf/arm_pmu_acpi.c 	int cpu, ret;
cpu               305 drivers/perf/arm_pmu_acpi.c 	for_each_possible_cpu(cpu) {
cpu               306 drivers/perf/arm_pmu_acpi.c 		struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
cpu               317 drivers/perf/arm_pmu_acpi.c 			pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
cpu               324 drivers/perf/arm_pmu_acpi.c 			pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
cpu               330 drivers/perf/arm_pmu_acpi.c 			pr_warn("Failed to register PMU for CPU%d\n", cpu);
cpu                28 drivers/perf/arm_pmu_platform.c 	int cpu = get_cpu();
cpu                32 drivers/perf/arm_pmu_platform.c 	pr_info("probing PMU on CPU %d\n", cpu);
cpu                47 drivers/perf/arm_pmu_platform.c 	int cpu, ret;
cpu                54 drivers/perf/arm_pmu_platform.c 	for_each_cpu(cpu, &pmu->supported_cpus)
cpu                55 drivers/perf/arm_pmu_platform.c 		per_cpu(hw_events->irq, cpu) = irq;
cpu                68 drivers/perf/arm_pmu_platform.c 	int cpu;
cpu                85 drivers/perf/arm_pmu_platform.c 	cpu = of_cpu_node_to_id(dn);
cpu                86 drivers/perf/arm_pmu_platform.c 	if (cpu < 0) {
cpu                88 drivers/perf/arm_pmu_platform.c 		cpu = nr_cpu_ids;
cpu                93 drivers/perf/arm_pmu_platform.c 	return cpu;
cpu               131 drivers/perf/arm_pmu_platform.c 		int cpu, irq;
cpu               142 drivers/perf/arm_pmu_platform.c 		cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
cpu               143 drivers/perf/arm_pmu_platform.c 		if (cpu < 0)
cpu               144 drivers/perf/arm_pmu_platform.c 			return cpu;
cpu               145 drivers/perf/arm_pmu_platform.c 		if (cpu >= nr_cpu_ids)
cpu               148 drivers/perf/arm_pmu_platform.c 		if (per_cpu(hw_events->irq, cpu)) {
cpu               153 drivers/perf/arm_pmu_platform.c 		per_cpu(hw_events->irq, cpu) = irq;
cpu               154 drivers/perf/arm_pmu_platform.c 		cpumask_set_cpu(cpu, &pmu->supported_cpus);
cpu               163 drivers/perf/arm_pmu_platform.c 	int cpu, err = 0;
cpu               165 drivers/perf/arm_pmu_platform.c 	for_each_cpu(cpu, &armpmu->supported_cpus) {
cpu               166 drivers/perf/arm_pmu_platform.c 		int irq = per_cpu(hw_events->irq, cpu);
cpu               170 drivers/perf/arm_pmu_platform.c 		err = armpmu_request_irq(irq, cpu);
cpu               180 drivers/perf/arm_pmu_platform.c 	int cpu;
cpu               183 drivers/perf/arm_pmu_platform.c 	for_each_cpu(cpu, &armpmu->supported_cpus) {
cpu               184 drivers/perf/arm_pmu_platform.c 		int irq = per_cpu(hw_events->irq, cpu);
cpu               186 drivers/perf/arm_pmu_platform.c 		armpmu_free_irq(irq, cpu);
cpu               358 drivers/perf/arm_smmuv3_pmu.c 	if (event->cpu < 0) {
cpu               397 drivers/perf/arm_smmuv3_pmu.c 	event->cpu = smmu_pmu->on_cpu;
cpu               585 drivers/perf/arm_smmuv3_pmu.c static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
cpu               591 drivers/perf/arm_smmuv3_pmu.c 	if (cpu != smmu_pmu->on_cpu)
cpu               594 drivers/perf/arm_smmuv3_pmu.c 	target = cpumask_any_but(cpu_online_mask, cpu);
cpu               598 drivers/perf/arm_smmuv3_pmu.c 	perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
cpu               669 drivers/perf/arm_spe_pmu.c 	if (event->cpu >= 0 &&
cpu               670 drivers/perf/arm_spe_pmu.c 	    !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
cpu               792 drivers/perf/arm_spe_pmu.c 	int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
cpu               794 drivers/perf/arm_spe_pmu.c 	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
cpu               820 drivers/perf/arm_spe_pmu.c 	int i, cpu = event->cpu;
cpu               837 drivers/perf/arm_spe_pmu.c 	if (cpu == -1)
cpu               838 drivers/perf/arm_spe_pmu.c 		cpu = raw_smp_processor_id();
cpu               840 drivers/perf/arm_spe_pmu.c 	buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
cpu              1071 drivers/perf/arm_spe_pmu.c static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
cpu              1076 drivers/perf/arm_spe_pmu.c 	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
cpu              1083 drivers/perf/arm_spe_pmu.c static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
cpu              1088 drivers/perf/arm_spe_pmu.c 	if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
cpu                71 drivers/perf/fsl_imx8_ddr_perf.c 	unsigned int cpu;
cpu                87 drivers/perf/fsl_imx8_ddr_perf.c 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
cpu               268 drivers/perf/fsl_imx8_ddr_perf.c 	if (event->cpu < 0) {
cpu               297 drivers/perf/fsl_imx8_ddr_perf.c 	event->cpu = pmu->cpu;
cpu               518 drivers/perf/fsl_imx8_ddr_perf.c static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
cpu               523 drivers/perf/fsl_imx8_ddr_perf.c 	if (cpu != pmu->cpu)
cpu               526 drivers/perf/fsl_imx8_ddr_perf.c 	target = cpumask_any_but(cpu_online_mask, cpu);
cpu               530 drivers/perf/fsl_imx8_ddr_perf.c 	perf_pmu_migrate_context(&pmu->pmu, cpu, target);
cpu               531 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->cpu = target;
cpu               533 drivers/perf/fsl_imx8_ddr_perf.c 	WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
cpu               569 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->cpu = raw_smp_processor_id();
cpu               608 drivers/perf/fsl_imx8_ddr_perf.c 	ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
cpu               146 drivers/perf/hisilicon/hisi_uncore_pmu.c 	if (event->cpu < 0)
cpu               171 drivers/perf/hisilicon/hisi_uncore_pmu.c 	event->cpu = hisi_pmu->on_cpu;
cpu               383 drivers/perf/hisilicon/hisi_uncore_pmu.c int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
cpu               391 drivers/perf/hisilicon/hisi_uncore_pmu.c 	cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
cpu               398 drivers/perf/hisilicon/hisi_uncore_pmu.c 	hisi_pmu->on_cpu = cpu;
cpu               401 drivers/perf/hisilicon/hisi_uncore_pmu.c 	WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
cpu               406 drivers/perf/hisilicon/hisi_uncore_pmu.c int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
cpu               413 drivers/perf/hisilicon/hisi_uncore_pmu.c 	if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
cpu               417 drivers/perf/hisilicon/hisi_uncore_pmu.c 	if (hisi_pmu->on_cpu != cpu)
cpu               426 drivers/perf/hisilicon/hisi_uncore_pmu.c 	target = cpumask_any_but(&pmu_online_cpus, cpu);
cpu               430 drivers/perf/hisilicon/hisi_uncore_pmu.c 	perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
cpu                97 drivers/perf/hisilicon/hisi_uncore_pmu.h int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node);
cpu                98 drivers/perf/hisilicon/hisi_uncore_pmu.h int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
cpu               206 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu, int cpu)
cpu               208 drivers/perf/qcom_l2_pmu.c 	return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
cpu               498 drivers/perf/qcom_l2_pmu.c 	if (event->cpu < 0) {
cpu               530 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
cpu               534 drivers/perf/qcom_l2_pmu.c 			"CPU%d not associated with L2 cluster\n", event->cpu);
cpu               540 drivers/perf/qcom_l2_pmu.c 	    (cluster->on_cpu != event->group_leader->cpu)) {
cpu               543 drivers/perf/qcom_l2_pmu.c 			 event->cpu, event->group_leader->cpu);
cpu               578 drivers/perf/qcom_l2_pmu.c 	event->cpu = cluster->on_cpu;
cpu               593 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
cpu               637 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
cpu               663 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
cpu               780 drivers/perf/qcom_l2_pmu.c 	struct l2cache_pmu *l2cache_pmu, int cpu)
cpu               802 drivers/perf/qcom_l2_pmu.c 			 "CPU%d associated with cluster %d\n", cpu,
cpu               804 drivers/perf/qcom_l2_pmu.c 		cpumask_set_cpu(cpu, &cluster->cluster_cpus);
cpu               805 drivers/perf/qcom_l2_pmu.c 		*per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
cpu               812 drivers/perf/qcom_l2_pmu.c static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
cpu               818 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(l2cache_pmu, cpu);
cpu               821 drivers/perf/qcom_l2_pmu.c 		cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
cpu               824 drivers/perf/qcom_l2_pmu.c 			WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
cpu               837 drivers/perf/qcom_l2_pmu.c 	cluster->on_cpu = cpu;
cpu               838 drivers/perf/qcom_l2_pmu.c 	cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
cpu               841 drivers/perf/qcom_l2_pmu.c 	WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu)));
cpu               847 drivers/perf/qcom_l2_pmu.c static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
cpu               855 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(l2cache_pmu, cpu);
cpu               860 drivers/perf/qcom_l2_pmu.c 	if (cluster->on_cpu != cpu)
cpu               864 drivers/perf/qcom_l2_pmu.c 	cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
cpu               870 drivers/perf/qcom_l2_pmu.c 	target = cpumask_any_but(&cluster_online_cpus, cpu);
cpu               876 drivers/perf/qcom_l2_pmu.c 	perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
cpu               499 drivers/perf/qcom_l3_pmu.c 	if (event->cpu < 0)
cpu               519 drivers/perf/qcom_l3_pmu.c 	event->cpu = cpumask_first(&l3pmu->cpumask);
cpu               706 drivers/perf/qcom_l3_pmu.c static int qcom_l3_cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
cpu               712 drivers/perf/qcom_l3_pmu.c 		cpumask_set_cpu(cpu, &l3pmu->cpumask);
cpu               717 drivers/perf/qcom_l3_pmu.c static int qcom_l3_cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
cpu               722 drivers/perf/qcom_l3_pmu.c 	if (!cpumask_test_and_clear_cpu(cpu, &l3pmu->cpumask))
cpu               724 drivers/perf/qcom_l3_pmu.c 	target = cpumask_any_but(cpu_online_mask, cpu);
cpu               727 drivers/perf/qcom_l3_pmu.c 	perf_pmu_migrate_context(&l3pmu->pmu, cpu, target);
cpu                70 drivers/perf/thunderx2_pmu.c 	int cpu;
cpu               186 drivers/perf/thunderx2_pmu.c 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
cpu               427 drivers/perf/thunderx2_pmu.c 	if (event->cpu < 0)
cpu               431 drivers/perf/thunderx2_pmu.c 	if (tx2_pmu->cpu >= nr_cpu_ids)
cpu               433 drivers/perf/thunderx2_pmu.c 	event->cpu = tx2_pmu->cpu;
cpu               577 drivers/perf/thunderx2_pmu.c 	int ret, cpu;
cpu               579 drivers/perf/thunderx2_pmu.c 	cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
cpu               582 drivers/perf/thunderx2_pmu.c 	tx2_pmu->cpu = cpu;
cpu               716 drivers/perf/thunderx2_pmu.c static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
cpu               727 drivers/perf/thunderx2_pmu.c 	if ((tx2_pmu->cpu >= nr_cpu_ids) &&
cpu               728 drivers/perf/thunderx2_pmu.c 		(tx2_pmu->node == cpu_to_node(cpu)))
cpu               729 drivers/perf/thunderx2_pmu.c 		tx2_pmu->cpu = cpu;
cpu               734 drivers/perf/thunderx2_pmu.c static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
cpu               744 drivers/perf/thunderx2_pmu.c 	if (cpu != tx2_pmu->cpu)
cpu               749 drivers/perf/thunderx2_pmu.c 	cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
cpu               754 drivers/perf/thunderx2_pmu.c 	tx2_pmu->cpu = new_cpu;
cpu               757 drivers/perf/thunderx2_pmu.c 	perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
cpu               128 drivers/perf/xgene_pmu.c 	cpumask_t cpu;
cpu               612 drivers/perf/xgene_pmu.c 	return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
cpu               908 drivers/perf/xgene_pmu.c 	if (event->cpu < 0)
cpu               919 drivers/perf/xgene_pmu.c 	event->cpu = cpumask_first(&pmu_dev->parent->cpu);
cpu              1794 drivers/perf/xgene_pmu.c static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
cpu              1799 drivers/perf/xgene_pmu.c 	if (cpumask_empty(&xgene_pmu->cpu))
cpu              1800 drivers/perf/xgene_pmu.c 		cpumask_set_cpu(cpu, &xgene_pmu->cpu);
cpu              1803 drivers/perf/xgene_pmu.c 	WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
cpu              1808 drivers/perf/xgene_pmu.c static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
cpu              1815 drivers/perf/xgene_pmu.c 	if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu))
cpu              1817 drivers/perf/xgene_pmu.c 	target = cpumask_any_but(cpu_online_mask, cpu);
cpu              1822 drivers/perf/xgene_pmu.c 		perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
cpu              1825 drivers/perf/xgene_pmu.c 		perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
cpu              1828 drivers/perf/xgene_pmu.c 		perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
cpu              1831 drivers/perf/xgene_pmu.c 		perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
cpu              1834 drivers/perf/xgene_pmu.c 	cpumask_set_cpu(target, &xgene_pmu->cpu);
cpu              1836 drivers/perf/xgene_pmu.c 	WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
cpu              1466 drivers/pinctrl/tegra/pinctrl-tegra114.c 	FUNCTION(cpu),
cpu              1626 drivers/pinctrl/tegra/pinctrl-tegra124.c 	FUNCTION(cpu),
cpu              1202 drivers/pinctrl/tegra/pinctrl-tegra210.c 	FUNCTION(cpu),
cpu                19 drivers/platform/mips/cpu_hwmon.c int loongson3_cpu_temp(int cpu)
cpu                23 drivers/platform/mips/cpu_hwmon.c 	reg = LOONGSON_CHIPTEMP(cpu);
cpu               483 drivers/platform/x86/compal-laptop.c TEMPERATURE_SHOW_TEMP_AND_LABEL(cpu,        TEMP_CPU,        "CPU_TEMP");
cpu               623 drivers/platform/x86/intel_ips.c static bool cpu_exceeded(struct ips_driver *ips, int cpu)
cpu               630 drivers/platform/x86/intel_ips.c 	avg = cpu ? ips->ctv2_avg_temp : ips->ctv1_avg_temp;
cpu               857 drivers/platform/x86/intel_ips.c static u16 read_ctv(struct ips_driver *ips, int cpu)
cpu               859 drivers/platform/x86/intel_ips.c 	int reg = cpu ? THM_CTV2 : THM_CTV1;
cpu                68 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	int cpu;
cpu                76 drivers/platform/x86/intel_speed_select_if/isst_if_common.c static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
cpu                85 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	sst_cmd->cpu = cpu;
cpu               123 drivers/platform/x86/intel_speed_select_if/isst_if_common.c int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
cpu               133 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 		if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
cpu               142 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
cpu               159 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	mbox_cmd.logical_cpu = sst_cmd->cpu;
cpu               183 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 			wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
cpu               190 drivers/platform/x86/intel_speed_select_if/isst_if_common.c static void isst_restore_msr_local(int cpu)
cpu               202 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 			if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
cpu               298 drivers/platform/x86/intel_speed_select_if/isst_if_common.c struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
cpu               302 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
cpu               303 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	    cpu >= num_possible_cpus())
cpu               306 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	bus_number = isst_cpu_info[cpu].bus_info[bus_no];
cpu               314 drivers/platform/x86/intel_speed_select_if/isst_if_common.c static int isst_if_cpu_online(unsigned int cpu)
cpu               322 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 		isst_cpu_info[cpu].bus_info[0] = -1;
cpu               323 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 		isst_cpu_info[cpu].bus_info[1] = -1;
cpu               325 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 		isst_cpu_info[cpu].bus_info[0] = data & 0xff;
cpu               326 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 		isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
cpu               331 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 		isst_cpu_info[cpu].punit_cpu_id = -1;
cpu               334 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	isst_cpu_info[cpu].punit_cpu_id = data;
cpu               336 drivers/platform/x86/intel_speed_select_if/isst_if_common.c 	isst_restore_msr_local(cpu);
cpu                63 drivers/platform/x86/intel_speed_select_if/isst_if_common.h struct pci_dev *isst_if_get_pci_dev(int cpu, int bus, int dev, int fn);
cpu                66 drivers/platform/x86/intel_speed_select_if/isst_if_common.h int isst_store_cmd(int cmd, int sub_command, u32 cpu, int mbox_cmd,
cpu                35 drivers/platform/x86/intel_turbo_max_3.c static int get_oc_core_priority(unsigned int cpu)
cpu                46 drivers/platform/x86/intel_turbo_max_3.c 		pr_debug("cpu %d OC mailbox write failed\n", cpu);
cpu                53 drivers/platform/x86/intel_turbo_max_3.c 			pr_debug("cpu %d OC mailbox read failed\n", cpu);
cpu                58 drivers/platform/x86/intel_turbo_max_3.c 			pr_debug("cpu %d OC mailbox still processing\n", cpu);
cpu                64 drivers/platform/x86/intel_turbo_max_3.c 			pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
cpu                70 drivers/platform/x86/intel_turbo_max_3.c 		pr_debug("cpu %d max_ratio %d\n", cpu, ret);
cpu                90 drivers/platform/x86/intel_turbo_max_3.c static int itmt_legacy_cpu_online(unsigned int cpu)
cpu                95 drivers/platform/x86/intel_turbo_max_3.c 	priority = get_oc_core_priority(cpu);
cpu                99 drivers/platform/x86/intel_turbo_max_3.c 	sched_set_itmt_core_prio(priority, cpu);
cpu                56 drivers/pnp/pnpbios/bioscalls.c #define Q2_SET_SEL(cpu, selname, address, size) \
cpu                58 drivers/pnp/pnpbios/bioscalls.c 	struct desc_struct *gdt = get_cpu_gdt_rw((cpu)); \
cpu                89 drivers/pnp/pnpbios/bioscalls.c 	int cpu;
cpu                98 drivers/pnp/pnpbios/bioscalls.c 	cpu = get_cpu();
cpu                99 drivers/pnp/pnpbios/bioscalls.c 	save_desc_40 = get_cpu_gdt_rw(cpu)[0x40 / 8];
cpu               100 drivers/pnp/pnpbios/bioscalls.c 	get_cpu_gdt_rw(cpu)[0x40 / 8] = bad_bios_desc;
cpu               138 drivers/pnp/pnpbios/bioscalls.c 	get_cpu_gdt_rw(cpu)[0x40 / 8] = save_desc_40;
cpu                31 drivers/power/reset/sc27xx-poweroff.c 	int cpu = smp_processor_id();
cpu                33 drivers/power/reset/sc27xx-poweroff.c 	freeze_secondary_cpus(cpu);
cpu                86 drivers/powercap/idle_inject.c 	unsigned int cpu;
cpu                88 drivers/powercap/idle_inject.c 	for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) {
cpu                89 drivers/powercap/idle_inject.c 		iit = per_cpu_ptr(&idle_inject_thread, cpu);
cpu               128 drivers/powercap/idle_inject.c static void idle_inject_fn(unsigned int cpu)
cpu               133 drivers/powercap/idle_inject.c 	ii_dev = per_cpu(idle_inject_device, cpu);
cpu               134 drivers/powercap/idle_inject.c 	iit = per_cpu_ptr(&idle_inject_thread, cpu);
cpu               217 drivers/powercap/idle_inject.c 	unsigned int cpu;
cpu               239 drivers/powercap/idle_inject.c 	for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
cpu               240 drivers/powercap/idle_inject.c 		iit = per_cpu_ptr(&idle_inject_thread, cpu);
cpu               256 drivers/powercap/idle_inject.c static void idle_inject_setup(unsigned int cpu)
cpu               269 drivers/powercap/idle_inject.c static int idle_inject_should_run(unsigned int cpu)
cpu               272 drivers/powercap/idle_inject.c 		per_cpu_ptr(&idle_inject_thread, cpu);
cpu               291 drivers/powercap/idle_inject.c 	int cpu, cpu_rb;
cpu               301 drivers/powercap/idle_inject.c 	for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
cpu               303 drivers/powercap/idle_inject.c 		if (per_cpu(idle_inject_device, cpu)) {
cpu               304 drivers/powercap/idle_inject.c 			pr_err("cpu%d is already registered\n", cpu);
cpu               308 drivers/powercap/idle_inject.c 		per_cpu(idle_inject_device, cpu) = ii_dev;
cpu               315 drivers/powercap/idle_inject.c 		if (cpu == cpu_rb)
cpu               335 drivers/powercap/idle_inject.c 	unsigned int cpu;
cpu               339 drivers/powercap/idle_inject.c 	for_each_cpu(cpu, to_cpumask(ii_dev->cpumask))
cpu               340 drivers/powercap/idle_inject.c 		per_cpu(idle_inject_device, cpu) = NULL;
cpu                94 drivers/powercap/intel_rapl_common.c 	int (*check_unit)(struct rapl_package *rp, int cpu);
cpu               644 drivers/powercap/intel_rapl_common.c 	int cpu;
cpu               653 drivers/powercap/intel_rapl_common.c 	cpu = rd->rp->lead_cpu;
cpu               668 drivers/powercap/intel_rapl_common.c 	if (rd->rp->priv->read_raw(cpu, &ra)) {
cpu               669 drivers/powercap/intel_rapl_common.c 		pr_debug("failed to read reg 0x%llx on cpu %d\n", ra.reg, cpu);
cpu               689 drivers/powercap/intel_rapl_common.c 	int cpu;
cpu               694 drivers/powercap/intel_rapl_common.c 	cpu = rd->rp->lead_cpu;
cpu               705 drivers/powercap/intel_rapl_common.c 	ret = rd->rp->priv->write_raw(cpu, &ra);
cpu               721 drivers/powercap/intel_rapl_common.c static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
cpu               728 drivers/powercap/intel_rapl_common.c 	if (rp->priv->read_raw(cpu, &ra)) {
cpu               730 drivers/powercap/intel_rapl_common.c 		       rp->priv->reg_unit, cpu);
cpu               749 drivers/powercap/intel_rapl_common.c static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
cpu               756 drivers/powercap/intel_rapl_common.c 	if (rp->priv->read_raw(cpu, &ra)) {
cpu               758 drivers/powercap/intel_rapl_common.c 		       rp->priv->reg_unit, cpu);
cpu              1146 drivers/powercap/intel_rapl_common.c static int rapl_check_domain(int cpu, int domain, struct rapl_package *rp)
cpu              1169 drivers/powercap/intel_rapl_common.c 	if (rp->priv->read_raw(cpu, &ra) || !ra.value)
cpu              1209 drivers/powercap/intel_rapl_common.c static int rapl_detect_domains(struct rapl_package *rp, int cpu)
cpu              1216 drivers/powercap/intel_rapl_common.c 		if (!rapl_check_domain(cpu, i, rp)) {
cpu              1273 drivers/powercap/intel_rapl_common.c struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv)
cpu              1275 drivers/powercap/intel_rapl_common.c 	int id = topology_logical_die_id(cpu);
cpu              1289 drivers/powercap/intel_rapl_common.c struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv)
cpu              1291 drivers/powercap/intel_rapl_common.c 	int id = topology_logical_die_id(cpu);
cpu              1293 drivers/powercap/intel_rapl_common.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu              1305 drivers/powercap/intel_rapl_common.c 	rp->lead_cpu = cpu;
cpu              1316 drivers/powercap/intel_rapl_common.c 	if (rapl_detect_domains(rp, cpu) || rapl_defaults->check_unit(rp, cpu)) {
cpu                55 drivers/powercap/intel_rapl_msr.c static int rapl_cpu_online(unsigned int cpu)
cpu                59 drivers/powercap/intel_rapl_msr.c 	rp = rapl_find_package_domain(cpu, &rapl_msr_priv);
cpu                61 drivers/powercap/intel_rapl_msr.c 		rp = rapl_add_package(cpu, &rapl_msr_priv);
cpu                65 drivers/powercap/intel_rapl_msr.c 	cpumask_set_cpu(cpu, &rp->cpumask);
cpu                69 drivers/powercap/intel_rapl_msr.c static int rapl_cpu_down_prep(unsigned int cpu)
cpu                74 drivers/powercap/intel_rapl_msr.c 	rp = rapl_find_package_domain(cpu, &rapl_msr_priv);
cpu                78 drivers/powercap/intel_rapl_msr.c 	cpumask_clear_cpu(cpu, &rp->cpumask);
cpu                82 drivers/powercap/intel_rapl_msr.c 	else if (rp->lead_cpu == cpu)
cpu                87 drivers/powercap/intel_rapl_msr.c static int rapl_msr_read_raw(int cpu, struct reg_action *ra)
cpu                91 drivers/powercap/intel_rapl_msr.c 	if (rdmsrl_safe_on_cpu(cpu, msr, &ra->value)) {
cpu                92 drivers/powercap/intel_rapl_msr.c 		pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu);
cpu               115 drivers/powercap/intel_rapl_msr.c static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
cpu               119 drivers/powercap/intel_rapl_msr.c 	ret = smp_call_function_single(cpu, rapl_msr_update_func, ra, 1);
cpu               198 drivers/ps3/ps3-lpm.c u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr)
cpu               242 drivers/ps3/ps3-lpm.c void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
cpu               303 drivers/ps3/ps3-lpm.c u32 ps3_read_ctr(u32 cpu, u32 ctr)
cpu               308 drivers/ps3/ps3-lpm.c 	val = ps3_read_phys_ctr(cpu, phys_ctr);
cpu               310 drivers/ps3/ps3-lpm.c 	if (ps3_get_ctr_size(cpu, phys_ctr) == 16)
cpu               324 drivers/ps3/ps3-lpm.c void ps3_write_ctr(u32 cpu, u32 ctr, u32 val)
cpu               331 drivers/ps3/ps3-lpm.c 	if (ps3_get_ctr_size(cpu, phys_ctr) == 16) {
cpu               332 drivers/ps3/ps3-lpm.c 		phys_val = ps3_read_phys_ctr(cpu, phys_ctr);
cpu               340 drivers/ps3/ps3-lpm.c 	ps3_write_phys_ctr(cpu, phys_ctr, val);
cpu               350 drivers/ps3/ps3-lpm.c u32 ps3_read_pm07_control(u32 cpu, u32 ctr)
cpu               362 drivers/ps3/ps3-lpm.c void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val)
cpu               387 drivers/ps3/ps3-lpm.c u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg)
cpu               439 drivers/ps3/ps3-lpm.c void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
cpu               509 drivers/ps3/ps3-lpm.c u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr)
cpu               519 drivers/ps3/ps3-lpm.c 	pm_ctrl = ps3_read_pm(cpu, pm_control);
cpu               528 drivers/ps3/ps3-lpm.c void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
cpu               538 drivers/ps3/ps3-lpm.c 	pm_ctrl = ps3_read_pm(cpu, pm_control);
cpu               543 drivers/ps3/ps3-lpm.c 		ps3_write_pm(cpu, pm_control, pm_ctrl);
cpu               548 drivers/ps3/ps3-lpm.c 		ps3_write_pm(cpu, pm_control, pm_ctrl);
cpu               826 drivers/ps3/ps3-lpm.c u32 ps3_get_hw_thread_id(int cpu)
cpu               828 drivers/ps3/ps3-lpm.c 	return get_hard_smp_processor_id(cpu);
cpu               838 drivers/ps3/ps3-lpm.c void ps3_enable_pm(u32 cpu)
cpu               882 drivers/ps3/ps3-lpm.c void ps3_disable_pm(u32 cpu)
cpu              1030 drivers/ps3/ps3-lpm.c u32 ps3_get_and_clear_pm_interrupts(u32 cpu)
cpu              1032 drivers/ps3/ps3-lpm.c 	return ps3_read_pm(cpu, pm_status);
cpu              1043 drivers/ps3/ps3-lpm.c void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
cpu              1046 drivers/ps3/ps3-lpm.c 		ps3_write_pm(cpu, pm_status, mask);
cpu              1056 drivers/ps3/ps3-lpm.c void ps3_disable_pm_interrupts(u32 cpu)
cpu              1058 drivers/ps3/ps3-lpm.c 	ps3_get_and_clear_pm_interrupts(cpu);
cpu              1059 drivers/ps3/ps3-lpm.c 	ps3_write_pm(cpu, pm_status, 0);
cpu                39 drivers/ptp/ptp_kvm.c 	int cpu;
cpu                45 drivers/ptp/ptp_kvm.c 	cpu = smp_processor_id();
cpu                46 drivers/ptp/ptp_kvm.c 	src = &hv_clock[cpu].pvti;
cpu                48 drivers/s390/char/sclp_config.c 	int cpu;
cpu                54 drivers/s390/char/sclp_config.c 	for_each_online_cpu(cpu) {
cpu                55 drivers/s390/char/sclp_config.c 		dev = get_cpu_device(cpu);
cpu                26 drivers/s390/char/sclp_early.c 	u16 boot_cpu_address, cpu;
cpu                71 drivers/s390/char/sclp_early.c 	for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
cpu               364 drivers/sbus/char/envctrl.c static int envctrl_read_cpu_info(int cpu, struct i2c_child_t *pchild,
cpu               374 drivers/sbus/char/envctrl.c 			if (++j == cpu) {
cpu               380 drivers/sbus/char/envctrl.c 	if (j != cpu)
cpu              1352 drivers/scsi/aacraid/aacraid.h 	__le32	cpu;
cpu                99 drivers/scsi/aha1740.c static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu)
cpu               104 drivers/scsi/aha1740.c 	offset = (char *) cpu - (char *) hdata->ecb;
cpu              2621 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_cpu_online(unsigned int cpu)
cpu              2626 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	p = &per_cpu(bnx2fc_percpu, cpu);
cpu              2629 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 					(void *)p, cpu_to_node(cpu),
cpu              2630 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 					"bnx2fc_thread/%d", cpu);
cpu              2635 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	kthread_bind(thread, cpu);
cpu              2641 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_cpu_offline(unsigned int cpu)
cpu              2647 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
cpu              2650 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	p = &per_cpu(bnx2fc_percpu, cpu);
cpu              2691 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	unsigned int cpu = 0;
cpu              2734 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	for_each_possible_cpu(cpu) {
cpu              2735 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		p = &per_cpu(bnx2fc_percpu, cpu);
cpu              1006 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	unsigned int cpu = wqe % num_possible_cpus();
cpu              1010 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	fps = &per_cpu(bnx2fc_percpu, cpu);
cpu               411 drivers/scsi/bnx2i/bnx2i_init.c static int bnx2i_cpu_online(unsigned int cpu)
cpu               416 drivers/scsi/bnx2i/bnx2i_init.c 	p = &per_cpu(bnx2i_percpu, cpu);
cpu               419 drivers/scsi/bnx2i/bnx2i_init.c 					cpu_to_node(cpu),
cpu               420 drivers/scsi/bnx2i/bnx2i_init.c 					"bnx2i_thread/%d", cpu);
cpu               425 drivers/scsi/bnx2i/bnx2i_init.c 	kthread_bind(thread, cpu);
cpu               431 drivers/scsi/bnx2i/bnx2i_init.c static int bnx2i_cpu_offline(unsigned int cpu)
cpu               438 drivers/scsi/bnx2i/bnx2i_init.c 	p = &per_cpu(bnx2i_percpu, cpu);
cpu               469 drivers/scsi/bnx2i/bnx2i_init.c 	unsigned cpu = 0;
cpu               494 drivers/scsi/bnx2i/bnx2i_init.c 	for_each_possible_cpu(cpu) {
cpu               495 drivers/scsi/bnx2i/bnx2i_init.c 		p = &per_cpu(bnx2i_percpu, cpu);
cpu              1482 drivers/scsi/bnx2i/bnx2i_iscsi.c 	unsigned cpu = 0;
cpu              1491 drivers/scsi/bnx2i/bnx2i_iscsi.c 		for_each_online_cpu(cpu) {
cpu              1492 drivers/scsi/bnx2i/bnx2i_iscsi.c 			p = &per_cpu(bnx2i_percpu, cpu);
cpu              1887 drivers/scsi/csiostor/csio_scsi.c 	int cpu = smp_processor_id();
cpu              1889 drivers/scsi/csiostor/csio_scsi.c 	struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
cpu              1286 drivers/scsi/fcoe/fcoe.c static void fcoe_thread_cleanup_local(unsigned int cpu)
cpu              1291 drivers/scsi/fcoe/fcoe.c 	p = per_cpu_ptr(&fcoe_percpu, cpu);
cpu              1345 drivers/scsi/fcoe/fcoe.c 	unsigned int cpu;
cpu              1406 drivers/scsi/fcoe/fcoe.c 		cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
cpu              1409 drivers/scsi/fcoe/fcoe.c 			cpu = fcoe_select_cpu();
cpu              1411 drivers/scsi/fcoe/fcoe.c 			cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
cpu              1414 drivers/scsi/fcoe/fcoe.c 	if (cpu >= nr_cpu_ids)
cpu              1417 drivers/scsi/fcoe/fcoe.c 	fps = &per_cpu(fcoe_percpu, cpu);
cpu              1433 drivers/scsi/fcoe/fcoe.c 	schedule_work_on(cpu, &fps->work);
cpu              2341 drivers/scsi/fcoe/fcoe.c 	unsigned int cpu;
cpu              2343 drivers/scsi/fcoe/fcoe.c 	for_each_possible_cpu(cpu) {
cpu              2344 drivers/scsi/fcoe/fcoe.c 		pp = &per_cpu(fcoe_percpu, cpu);
cpu              2474 drivers/scsi/fcoe/fcoe.c 	unsigned int cpu;
cpu              2491 drivers/scsi/fcoe/fcoe.c 	for_each_possible_cpu(cpu) {
cpu              2492 drivers/scsi/fcoe/fcoe.c 		p = per_cpu_ptr(&fcoe_percpu, cpu);
cpu              2525 drivers/scsi/fcoe/fcoe.c 	unsigned int cpu;
cpu              2541 drivers/scsi/fcoe/fcoe.c 	for_each_possible_cpu(cpu)
cpu              2542 drivers/scsi/fcoe/fcoe.c 		fcoe_thread_cleanup_local(cpu);
cpu               172 drivers/scsi/fcoe/fcoe_transport.c 	unsigned int cpu;
cpu               183 drivers/scsi/fcoe/fcoe_transport.c 	for_each_possible_cpu(cpu) {
cpu               184 drivers/scsi/fcoe/fcoe_transport.c 		stats = per_cpu_ptr(lport->stats, cpu);
cpu               440 drivers/scsi/hisi_sas/hisi_sas_main.c 		int cpu = raw_smp_processor_id();
cpu               441 drivers/scsi/hisi_sas/hisi_sas_main.c 		unsigned int dq_index = hisi_hba->reply_map[cpu];
cpu              2350 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	int queue, cpu;
cpu              2360 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 		for_each_cpu(cpu, mask)
cpu              2361 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 			hisi_hba->reply_map[cpu] = queue;
cpu              2366 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	for_each_possible_cpu(cpu)
cpu              2367 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 		hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count;
cpu              2861 drivers/scsi/hpsa.c 	int cpu;
cpu              2864 drivers/scsi/hpsa.c 	cpu = get_cpu();
cpu              2865 drivers/scsi/hpsa.c 	lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
cpu              7441 drivers/scsi/hpsa.c 	unsigned int queue, cpu;
cpu              7448 drivers/scsi/hpsa.c 		for_each_cpu(cpu, mask)
cpu              7449 drivers/scsi/hpsa.c 			h->reply_map[cpu] = queue;
cpu              7454 drivers/scsi/hpsa.c 	for_each_possible_cpu(cpu)
cpu              7455 drivers/scsi/hpsa.c 		h->reply_map[cpu] = 0;
cpu              8210 drivers/scsi/hpsa.c 	int cpu;
cpu              8212 drivers/scsi/hpsa.c 	for_each_online_cpu(cpu) {
cpu              8214 drivers/scsi/hpsa.c 		lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
cpu               812 drivers/scsi/libfc/fc_exch.c 	unsigned int cpu;
cpu               824 drivers/scsi/libfc/fc_exch.c 	cpu = get_cpu();
cpu               825 drivers/scsi/libfc/fc_exch.c 	pool = per_cpu_ptr(mp->pool, cpu);
cpu               872 drivers/scsi/libfc/fc_exch.c 	ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
cpu               928 drivers/scsi/libfc/fc_exch.c 	u16 cpu = xid & fc_cpu_mask;
cpu               933 drivers/scsi/libfc/fc_exch.c 	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
cpu               935 drivers/scsi/libfc/fc_exch.c 		       lport->host->host_no, lport->port_id, xid, cpu);
cpu               940 drivers/scsi/libfc/fc_exch.c 		pool = per_cpu_ptr(mp->pool, cpu);
cpu              1966 drivers/scsi/libfc/fc_exch.c 	unsigned int cpu;
cpu              1969 drivers/scsi/libfc/fc_exch.c 		for_each_possible_cpu(cpu)
cpu              1971 drivers/scsi/libfc/fc_exch.c 					   per_cpu_ptr(ema->mp->pool, cpu),
cpu              2454 drivers/scsi/libfc/fc_exch.c 	unsigned int cpu;
cpu              2506 drivers/scsi/libfc/fc_exch.c 	for_each_possible_cpu(cpu) {
cpu              2507 drivers/scsi/libfc/fc_exch.c 		pool = per_cpu_ptr(mp->pool, cpu);
cpu               294 drivers/scsi/libfc/fc_lport.c 	unsigned int cpu;
cpu               303 drivers/scsi/libfc/fc_lport.c 	for_each_possible_cpu(cpu) {
cpu               306 drivers/scsi/libfc/fc_lport.c 		stats = per_cpu_ptr(lport->stats, cpu);
cpu              8656 drivers/scsi/lpfc/lpfc_init.c 	int cpu;
cpu              8658 drivers/scsi/lpfc/lpfc_init.c 	cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
cpu              8664 drivers/scsi/lpfc/lpfc_init.c 					      LPFC_CQE_EXP_COUNT, cpu);
cpu              8669 drivers/scsi/lpfc/lpfc_init.c 					      phba->sli4_hba.cq_ecount, cpu);
cpu              8677 drivers/scsi/lpfc/lpfc_init.c 	qdesc->chann = cpu;
cpu              8687 drivers/scsi/lpfc/lpfc_init.c 					      LPFC_WQE_EXP_COUNT, cpu);
cpu              8691 drivers/scsi/lpfc/lpfc_init.c 					      phba->sli4_hba.wq_ecount, cpu);
cpu              8700 drivers/scsi/lpfc/lpfc_init.c 	qdesc->chann = cpu;
cpu              8724 drivers/scsi/lpfc/lpfc_init.c 	int idx, cpu, eqcpu;
cpu              8813 drivers/scsi/lpfc/lpfc_init.c 	for_each_present_cpu(cpu) {
cpu              8818 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              8828 drivers/scsi/lpfc/lpfc_init.c 					      phba->sli4_hba.eq_ecount, cpu);
cpu              8837 drivers/scsi/lpfc/lpfc_init.c 		qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
cpu              8850 drivers/scsi/lpfc/lpfc_init.c 	for_each_present_cpu(cpu) {
cpu              8851 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              8876 drivers/scsi/lpfc/lpfc_init.c 			cpu = lpfc_find_cpu_handle(phba, idx,
cpu              8882 drivers/scsi/lpfc/lpfc_init.c 						      cpu);
cpu              8891 drivers/scsi/lpfc/lpfc_init.c 			qdesc->chann = cpu;
cpu              8900 drivers/scsi/lpfc/lpfc_init.c 	cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
cpu              8904 drivers/scsi/lpfc/lpfc_init.c 				      phba->sli4_hba.cq_ecount, cpu);
cpu              8916 drivers/scsi/lpfc/lpfc_init.c 				      phba->sli4_hba.cq_ecount, cpu);
cpu              8923 drivers/scsi/lpfc/lpfc_init.c 	qdesc->chann = cpu;
cpu              8935 drivers/scsi/lpfc/lpfc_init.c 				      phba->sli4_hba.mq_ecount, cpu);
cpu              8941 drivers/scsi/lpfc/lpfc_init.c 	qdesc->chann = cpu;
cpu              8951 drivers/scsi/lpfc/lpfc_init.c 				      phba->sli4_hba.wq_ecount, cpu);
cpu              8957 drivers/scsi/lpfc/lpfc_init.c 	qdesc->chann = cpu;
cpu              8965 drivers/scsi/lpfc/lpfc_init.c 					      phba->sli4_hba.cq_ecount, cpu);
cpu              8971 drivers/scsi/lpfc/lpfc_init.c 		qdesc->chann = cpu;
cpu              8978 drivers/scsi/lpfc/lpfc_init.c 					      phba->sli4_hba.wq_ecount, cpu);
cpu              8984 drivers/scsi/lpfc/lpfc_init.c 		qdesc->chann = cpu;
cpu              8996 drivers/scsi/lpfc/lpfc_init.c 				      phba->sli4_hba.rq_ecount, cpu);
cpu              9007 drivers/scsi/lpfc/lpfc_init.c 				      phba->sli4_hba.rq_ecount, cpu);
cpu              9018 drivers/scsi/lpfc/lpfc_init.c 			cpu = lpfc_find_cpu_handle(phba, idx,
cpu              9025 drivers/scsi/lpfc/lpfc_init.c 						      cpu);
cpu              9038 drivers/scsi/lpfc/lpfc_init.c 						   cpu_to_node(cpu));
cpu              9054 drivers/scsi/lpfc/lpfc_init.c 						      cpu);
cpu              9360 drivers/scsi/lpfc/lpfc_init.c 	int qidx, cpu;
cpu              9425 drivers/scsi/lpfc/lpfc_init.c 		for_each_present_cpu(cpu) {
cpu              9426 drivers/scsi/lpfc/lpfc_init.c 			cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              9460 drivers/scsi/lpfc/lpfc_init.c 		cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
cpu              9461 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              10582 drivers/scsi/lpfc/lpfc_init.c 	int cpu;
cpu              10585 drivers/scsi/lpfc/lpfc_init.c 	for_each_present_cpu(cpu) {
cpu              10586 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              10596 drivers/scsi/lpfc/lpfc_init.c 			return cpu;
cpu              10600 drivers/scsi/lpfc/lpfc_init.c 			return cpu;
cpu              10614 drivers/scsi/lpfc/lpfc_init.c lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
cpu              10625 drivers/scsi/lpfc/lpfc_init.c 		    (cpu != idx))
cpu              10645 drivers/scsi/lpfc/lpfc_init.c 	int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
cpu              10656 drivers/scsi/lpfc/lpfc_init.c 	for_each_possible_cpu(cpu) {
cpu              10657 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              10672 drivers/scsi/lpfc/lpfc_init.c 	for_each_present_cpu(cpu) {
cpu              10673 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              10675 drivers/scsi/lpfc/lpfc_init.c 		cpuinfo = &cpu_data(cpu);
cpu              10678 drivers/scsi/lpfc/lpfc_init.c 		if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
cpu              10683 drivers/scsi/lpfc/lpfc_init.c 		cpup->core_id = cpu;
cpu              10688 drivers/scsi/lpfc/lpfc_init.c 				cpu, cpup->phys_id, cpup->core_id, cpup->flag);
cpu              10729 drivers/scsi/lpfc/lpfc_init.c 				cpu = cpumask_first(cpu_present_mask);
cpu              10730 drivers/scsi/lpfc/lpfc_init.c 				cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              10740 drivers/scsi/lpfc/lpfc_init.c 		for_each_cpu_and(cpu, maskp, cpu_present_mask) {
cpu              10742 drivers/scsi/lpfc/lpfc_init.c 			cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              10756 drivers/scsi/lpfc/lpfc_init.c 					cpu, cpup->irq, cpup->eq, cpup->flag);
cpu              10768 drivers/scsi/lpfc/lpfc_init.c 	for_each_present_cpu(cpu) {
cpu              10769 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              10812 drivers/scsi/lpfc/lpfc_init.c 					cpu, cpup->irq, new_cpu, cpup->phys_id);
cpu              10819 drivers/scsi/lpfc/lpfc_init.c 	for_each_present_cpu(cpu) {
cpu              10820 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              10865 drivers/scsi/lpfc/lpfc_init.c 					cpu, cpup->irq, new_cpu,
cpu              10874 drivers/scsi/lpfc/lpfc_init.c 	for_each_present_cpu(cpu) {
cpu              10875 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              10887 drivers/scsi/lpfc/lpfc_init.c 				cpu, cpup->phys_id, cpup->core_id,
cpu              10901 drivers/scsi/lpfc/lpfc_init.c 	for_each_present_cpu(cpu) {
cpu              10902 drivers/scsi/lpfc/lpfc_init.c 		cpup = &phba->sli4_hba.cpu_map[cpu];
cpu              10965 drivers/scsi/lpfc/lpfc_init.c 				cpu, cpup->phys_id, cpup->core_id,
cpu              10983 drivers/scsi/lpfc/lpfc_init.c lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
cpu              11002 drivers/scsi/lpfc/lpfc_init.c 		if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
cpu              11089 drivers/scsi/lpfc/lpfc_init.c static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
cpu              11104 drivers/scsi/lpfc/lpfc_init.c 	lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
cpu              11115 drivers/scsi/lpfc/lpfc_init.c static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
cpu              11132 drivers/scsi/lpfc/lpfc_init.c 		if (n == cpu)
cpu              1148 drivers/scsi/lpfc/lpfc_nvme.c 		uint32_t cpu;
cpu              1150 drivers/scsi/lpfc/lpfc_nvme.c 		cpu = raw_smp_processor_id();
cpu              1151 drivers/scsi/lpfc/lpfc_nvme.c 		if (cpu < LPFC_CHECK_CPU_CNT) {
cpu              1152 drivers/scsi/lpfc/lpfc_nvme.c 			if (lpfc_ncmd->cpu != cpu)
cpu              1157 drivers/scsi/lpfc/lpfc_nvme.c 						 cpu, lpfc_ncmd->cpu);
cpu              1158 drivers/scsi/lpfc/lpfc_nvme.c 			phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
cpu              1501 drivers/scsi/lpfc/lpfc_nvme.c 	int idx, cpu;
cpu              1629 drivers/scsi/lpfc/lpfc_nvme.c 		cpu = raw_smp_processor_id();
cpu              1630 drivers/scsi/lpfc/lpfc_nvme.c 		idx = phba->sli4_hba.cpu_map[cpu].hdwq;
cpu              1709 drivers/scsi/lpfc/lpfc_nvme.c 		cpu = raw_smp_processor_id();
cpu              1710 drivers/scsi/lpfc/lpfc_nvme.c 		if (cpu < LPFC_CHECK_CPU_CNT) {
cpu              1711 drivers/scsi/lpfc/lpfc_nvme.c 			lpfc_ncmd->cpu = cpu;
cpu              1712 drivers/scsi/lpfc/lpfc_nvme.c 			if (idx != cpu)
cpu              1717 drivers/scsi/lpfc/lpfc_nvme.c 						lpfc_ncmd->cpu,
cpu              1719 drivers/scsi/lpfc/lpfc_nvme.c 			phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
cpu               378 drivers/scsi/lpfc/lpfc_nvmet.c 	int cpu;
cpu               494 drivers/scsi/lpfc/lpfc_nvmet.c 	cpu = raw_smp_processor_id();
cpu               495 drivers/scsi/lpfc/lpfc_nvmet.c 	infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
cpu               828 drivers/scsi/lpfc/lpfc_nvmet.c 			if (ctxp->cpu != id)
cpu               832 drivers/scsi/lpfc/lpfc_nvmet.c 						id, ctxp->cpu);
cpu               975 drivers/scsi/lpfc/lpfc_nvmet.c 		ctxp->cpu = id; /* Setup cpu for cmpl check */
cpu              1297 drivers/scsi/lpfc/lpfc_nvmet.c 	int i, j, idx, cpu;
cpu              1361 drivers/scsi/lpfc/lpfc_nvmet.c 	cpu = cpumask_first(cpu_present_mask);
cpu              1416 drivers/scsi/lpfc/lpfc_nvmet.c 		infop = lpfc_get_ctx_list(phba, cpu, idx);
cpu              1426 drivers/scsi/lpfc/lpfc_nvmet.c 			cpu = cpumask_first(cpu_present_mask);
cpu              1429 drivers/scsi/lpfc/lpfc_nvmet.c 		cpu = cpumask_next(cpu, cpu_present_mask);
cpu              1430 drivers/scsi/lpfc/lpfc_nvmet.c 		if (cpu == nr_cpu_ids)
cpu              1431 drivers/scsi/lpfc/lpfc_nvmet.c 			cpu = cpumask_first(cpu_present_mask);
cpu               103 drivers/scsi/lpfc/lpfc_nvmet.h #define lpfc_get_ctx_list(phba, cpu, mrq)  \
cpu               104 drivers/scsi/lpfc/lpfc_nvmet.h 	(phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
cpu               123 drivers/scsi/lpfc/lpfc_nvmet.h 	uint16_t cpu;
cpu               639 drivers/scsi/lpfc/lpfc_scsi.c 	uint32_t cpu, idx;
cpu               643 drivers/scsi/lpfc/lpfc_scsi.c 	cpu = raw_smp_processor_id();
cpu               648 drivers/scsi/lpfc/lpfc_scsi.c 		idx = phba->sli4_hba.cpu_map[cpu].hdwq;
cpu               669 drivers/scsi/lpfc/lpfc_scsi.c 	lpfc_cmd->cpu = cpu;
cpu              3809 drivers/scsi/lpfc/lpfc_scsi.c 	int cpu;
cpu              3830 drivers/scsi/lpfc/lpfc_scsi.c 		cpu = raw_smp_processor_id();
cpu              3831 drivers/scsi/lpfc/lpfc_scsi.c 		if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
cpu              3832 drivers/scsi/lpfc/lpfc_scsi.c 			phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
cpu              4507 drivers/scsi/lpfc/lpfc_scsi.c 	int cpu;
cpu              4629 drivers/scsi/lpfc/lpfc_scsi.c 		cpu = raw_smp_processor_id();
cpu              4630 drivers/scsi/lpfc/lpfc_scsi.c 		if (cpu < LPFC_CHECK_CPU_CNT) {
cpu              4633 drivers/scsi/lpfc/lpfc_scsi.c 			hdwq->cpucheck_xmt_io[cpu]++;
cpu              14514 drivers/scsi/lpfc/lpfc_sli.c 		      uint32_t entry_size, uint32_t entry_count, int cpu)
cpu              14531 drivers/scsi/lpfc/lpfc_sli.c 			     GFP_KERNEL, cpu_to_node(cpu));
cpu               383 drivers/scsi/lpfc/lpfc_sli.h 	uint16_t cpu;
cpu              1022 drivers/scsi/lpfc/lpfc_sli4.h 					 uint32_t entry_count, int cpu);
cpu              5698 drivers/scsi/megaraid/megaraid_sas_base.c 	unsigned int queue, cpu, low_latency_index_start;
cpu              5707 drivers/scsi/megaraid/megaraid_sas_base.c 		for_each_cpu(cpu, mask)
cpu              5708 drivers/scsi/megaraid/megaraid_sas_base.c 			instance->reply_map[cpu] = queue;
cpu              5714 drivers/scsi/megaraid/megaraid_sas_base.c 	for_each_possible_cpu(cpu) {
cpu              5715 drivers/scsi/megaraid/megaraid_sas_base.c 		instance->reply_map[cpu] = queue;
cpu              2888 drivers/scsi/mpt3sas/mpt3sas_base.c 	unsigned int cpu, nr_cpus, nr_msix, index = 0;
cpu              2935 drivers/scsi/mpt3sas/mpt3sas_base.c 			for_each_cpu_and(cpu, mask, cpu_online_mask) {
cpu              2936 drivers/scsi/mpt3sas/mpt3sas_base.c 				if (cpu >= ioc->cpu_msix_table_sz)
cpu              2938 drivers/scsi/mpt3sas/mpt3sas_base.c 				ioc->cpu_msix_table[cpu] = reply_q->msix_index;
cpu              2945 drivers/scsi/mpt3sas/mpt3sas_base.c 	cpu = cpumask_first(cpu_online_mask);
cpu              2955 drivers/scsi/mpt3sas/mpt3sas_base.c 		if (cpu >= nr_cpus)
cpu              2962 drivers/scsi/mpt3sas/mpt3sas_base.c 			ioc->cpu_msix_table[cpu] = reply_q->msix_index;
cpu              2963 drivers/scsi/mpt3sas/mpt3sas_base.c 			cpu = cpumask_next(cpu, cpu_online_mask);
cpu              1347 drivers/scsi/myrs.c 	if (info->cpu[0].cpu_count) {
cpu              1350 drivers/scsi/myrs.c 			if (tbl[i].type == info->cpu[0].cpu_type) {
cpu              1356 drivers/scsi/myrs.c 	if (info->cpu[1].cpu_count) {
cpu              1359 drivers/scsi/myrs.c 			if (tbl[i].type == info->cpu[1].cpu_type) {
cpu              1368 drivers/scsi/myrs.c 			       info->cpu[0].cpu_name,
cpu              1369 drivers/scsi/myrs.c 			       first_processor, info->cpu[0].cpu_count,
cpu              1370 drivers/scsi/myrs.c 			       info->cpu[1].cpu_name,
cpu              1371 drivers/scsi/myrs.c 			       second_processor, info->cpu[1].cpu_count);
cpu              1374 drivers/scsi/myrs.c 			       info->cpu[0].cpu_name,
cpu              1375 drivers/scsi/myrs.c 			       first_processor, info->cpu[0].cpu_count);
cpu              1378 drivers/scsi/myrs.c 			       info->cpu[1].cpu_name,
cpu              1379 drivers/scsi/myrs.c 			       second_processor, info->cpu[1].cpu_count);
cpu               271 drivers/scsi/myrs.h 	} __packed cpu[2];
cpu               172 drivers/scsi/qedf/qedf.h 	unsigned int cpu;
cpu                78 drivers/scsi/qedf/qedf_els.c 	els_req->cpu = smp_processor_id();
cpu               471 drivers/scsi/qedf/qedf_io.c 	io_req->cpu = 0;
cpu               837 drivers/scsi/qedf/qedf_io.c 		io_log->req_cpu = io_req->cpu;
cpu               841 drivers/scsi/qedf/qedf_io.c 		io_log->req_cpu = io_req->cpu;
cpu               873 drivers/scsi/qedf/qedf_io.c 	io_req->cpu = smp_processor_id();
cpu              2313 drivers/scsi/qedf/qedf_io.c 	io_req->cpu = smp_processor_id();
cpu              2098 drivers/scsi/qedf/qedf_main.c 	unsigned int cpu;
cpu              2152 drivers/scsi/qedf/qedf_main.c 			cpu = 0;
cpu              2154 drivers/scsi/qedf/qedf_main.c 			cpu = io_req->cpu;
cpu              2174 drivers/scsi/qedf/qedf_main.c 		queue_work_on(cpu, qedf_io_wq, &io_work->work);
cpu              2268 drivers/scsi/qedf/qedf_main.c 	int i, rc, cpu;
cpu              2272 drivers/scsi/qedf/qedf_main.c 	cpu = cpumask_first(cpu_online_mask);
cpu              2290 drivers/scsi/qedf/qedf_main.c 		rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
cpu              2291 drivers/scsi/qedf/qedf_main.c 		cpu = cpumask_next(cpu, cpu_online_mask);
cpu              1205 drivers/scsi/qedi/qedi_main.c 	int cpu;
cpu              1220 drivers/scsi/qedi/qedi_main.c 	cpu = smp_processor_id();
cpu              1221 drivers/scsi/qedi/qedi_main.c 	p = &per_cpu(qedi_percpu, cpu);
cpu              1343 drivers/scsi/qedi/qedi_main.c 	int i, rc, cpu;
cpu              1346 drivers/scsi/qedi/qedi_main.c 	cpu = cpumask_first(cpu_online_mask);
cpu              1366 drivers/scsi/qedi/qedi_main.c 					   get_cpu_mask(cpu));
cpu              1367 drivers/scsi/qedi/qedi_main.c 		cpu = cpumask_next(cpu, cpu_online_mask);
cpu              1890 drivers/scsi/qedi/qedi_main.c static int qedi_cpu_online(unsigned int cpu)
cpu              1896 drivers/scsi/qedi/qedi_main.c 					cpu_to_node(cpu),
cpu              1897 drivers/scsi/qedi/qedi_main.c 					"qedi_thread/%d", cpu);
cpu              1901 drivers/scsi/qedi/qedi_main.c 	kthread_bind(thread, cpu);
cpu              1907 drivers/scsi/qedi/qedi_main.c static int qedi_cpu_offline(unsigned int cpu)
cpu              2705 drivers/scsi/qedi/qedi_main.c 	int cpu, rc = 0;
cpu              2724 drivers/scsi/qedi/qedi_main.c 	for_each_possible_cpu(cpu) {
cpu              2725 drivers/scsi/qedi/qedi_main.c 		p = &per_cpu(qedi_percpu, cpu);
cpu              4272 drivers/scsi/qla2xxx/qla_target.c 	int tag, cpu;
cpu              4274 drivers/scsi/qla2xxx/qla_target.c 	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
cpu              4287 drivers/scsi/qla2xxx/qla_target.c 	cmd->se_cmd.map_cpu = cpu;
cpu              5347 drivers/scsi/qla2xxx/qla_target.c 	int tag, cpu;
cpu              5379 drivers/scsi/qla2xxx/qla_target.c 	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
cpu              5407 drivers/scsi/qla2xxx/qla_target.c 	cmd->se_cmd.map_cpu = cpu;
cpu                37 drivers/scsi/sgiwd93.c 	void *cpu;
cpu                76 drivers/scsi/sgiwd93.c 	hcp = hd->cpu;
cpu                98 drivers/scsi/sgiwd93.c 	dma_cache_sync(hd->dev, hd->cpu,
cpu                99 drivers/scsi/sgiwd93.c 		       (unsigned long)(hcp + 1) - (unsigned long)hd->cpu,
cpu               109 drivers/scsi/sgiwd93.c 	pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hdata->cpu);
cpu               178 drivers/scsi/sgiwd93.c 	struct hpc_chunk *hcp = (struct hpc_chunk *)hdata->cpu;
cpu               237 drivers/scsi/sgiwd93.c 	hdata->cpu = dma_alloc_attrs(&pdev->dev, HPC_DMA_SIZE, &hdata->dma,
cpu               239 drivers/scsi/sgiwd93.c 	if (!hdata->cpu) {
cpu               277 drivers/scsi/sgiwd93.c 	dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
cpu               294 drivers/scsi/sgiwd93.c 	dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
cpu                21 drivers/sh/intc/chip.c 	unsigned int cpu;
cpu                23 drivers/sh/intc/chip.c 	for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
cpu                25 drivers/sh/intc/chip.c 		if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
cpu                28 drivers/sh/intc/chip.c 		addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
cpu                47 drivers/sh/intc/chip.c 	unsigned int cpu;
cpu                51 drivers/sh/intc/chip.c 	for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
cpu                53 drivers/sh/intc/chip.c 		if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
cpu                56 drivers/sh/intc/chip.c 		addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
cpu               210 drivers/sh/intc/handle.c 	unsigned int cpu;
cpu               217 drivers/sh/intc/handle.c 		for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
cpu               218 drivers/sh/intc/handle.c 			addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
cpu               223 drivers/sh/intc/handle.c 		for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
cpu               224 drivers/sh/intc/handle.c 			addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
cpu                56 drivers/soc/fsl/dpio/dpio-driver.c static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu)
cpu                73 drivers/soc/fsl/dpio/dpio-driver.c 	return cluster_base + cpu / cluster_size;
cpu                94 drivers/soc/fsl/dpio/dpio-driver.c static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
cpu               116 drivers/soc/fsl/dpio/dpio-driver.c 	cpumask_set_cpu(cpu, &mask);
cpu               120 drivers/soc/fsl/dpio/dpio-driver.c 			irq->msi_desc->irq, cpu);
cpu               187 drivers/soc/fsl/dpio/dpio-driver.c 	desc.cpu = possible_next_cpu;
cpu               190 drivers/soc/fsl/dpio/dpio-driver.c 	sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu);
cpu               197 drivers/soc/fsl/dpio/dpio-driver.c 				desc.cpu);
cpu               243 drivers/soc/fsl/dpio/dpio-driver.c 	err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
cpu               280 drivers/soc/fsl/dpio/dpio-driver.c 	int err = 0, cpu;
cpu               284 drivers/soc/fsl/dpio/dpio-driver.c 	cpu = dpaa2_io_get_cpu(priv->io);
cpu               290 drivers/soc/fsl/dpio/dpio-driver.c 	cpumask_set_cpu(cpu, cpus_unused_mask);
cpu                49 drivers/soc/fsl/dpio/dpio-service.c 						     int cpu)
cpu                54 drivers/soc/fsl/dpio/dpio-service.c 	if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
cpu                61 drivers/soc/fsl/dpio/dpio-service.c 	if (unlikely(cpu < 0))
cpu                62 drivers/soc/fsl/dpio/dpio-service.c 		cpu = smp_processor_id();
cpu                65 drivers/soc/fsl/dpio/dpio-service.c 	return dpio_by_cpu[cpu];
cpu                90 drivers/soc/fsl/dpio/dpio-service.c struct dpaa2_io *dpaa2_io_service_select(int cpu)
cpu                92 drivers/soc/fsl/dpio/dpio-service.c 	if (cpu == DPAA2_IO_ANY_CPU)
cpu                95 drivers/soc/fsl/dpio/dpio-service.c 	return service_select_by_cpu(NULL, cpu);
cpu               118 drivers/soc/fsl/dpio/dpio-service.c 	if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
cpu               148 drivers/soc/fsl/dpio/dpio-service.c 	if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
cpu               149 drivers/soc/fsl/dpio/dpio-service.c 		dpio_by_cpu[desc->cpu] = obj;
cpu               169 drivers/soc/fsl/dpio/dpio-service.c 	dpio_by_cpu[d->dpio_desc.cpu] = NULL;
cpu               231 drivers/soc/fsl/dpio/dpio-service.c 	return d->dpio_desc.cpu;
cpu               560 drivers/soc/fsl/qbman/bman.c 	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
cpu               566 drivers/soc/fsl/qbman/bman.c 	if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
cpu               599 drivers/soc/fsl/qbman/bman.c 	portal = &per_cpu(bman_affine_portal, c->cpu);
cpu               605 drivers/soc/fsl/qbman/bman.c 	cpumask_set_cpu(c->cpu, &affine_mask);
cpu                45 drivers/soc/fsl/qbman/bman_portal.c 			 __func__, pcfg->cpu);
cpu                50 drivers/soc/fsl/qbman/bman_portal.c 	affine_bportals[pcfg->cpu] = p;
cpu                52 drivers/soc/fsl/qbman/bman_portal.c 	dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
cpu                57 drivers/soc/fsl/qbman/bman_portal.c static int bman_offline_cpu(unsigned int cpu)
cpu                59 drivers/soc/fsl/qbman/bman_portal.c 	struct bman_portal *p = affine_bportals[cpu];
cpu                70 drivers/soc/fsl/qbman/bman_portal.c 	cpu = cpumask_any_but(cpu_online_mask, cpu);
cpu                71 drivers/soc/fsl/qbman/bman_portal.c 	irq_set_affinity(pcfg->irq, cpumask_of(cpu));
cpu                75 drivers/soc/fsl/qbman/bman_portal.c static int bman_online_cpu(unsigned int cpu)
cpu                77 drivers/soc/fsl/qbman/bman_portal.c 	struct bman_portal *p = affine_bportals[cpu];
cpu                87 drivers/soc/fsl/qbman/bman_portal.c 	irq_set_affinity(pcfg->irq, cpumask_of(cpu));
cpu               103 drivers/soc/fsl/qbman/bman_portal.c 	int irq, cpu, err, i;
cpu               135 drivers/soc/fsl/qbman/bman_portal.c 	pcfg->cpu = -1;
cpu               158 drivers/soc/fsl/qbman/bman_portal.c 	cpu = cpumask_next_zero(-1, &portal_cpus);
cpu               159 drivers/soc/fsl/qbman/bman_portal.c 	if (cpu >= nr_cpu_ids) {
cpu               166 drivers/soc/fsl/qbman/bman_portal.c 	cpumask_set_cpu(cpu, &portal_cpus);
cpu               168 drivers/soc/fsl/qbman/bman_portal.c 	pcfg->cpu = cpu;
cpu               176 drivers/soc/fsl/qbman/bman_portal.c 	if (!cpu_online(cpu))
cpu               177 drivers/soc/fsl/qbman/bman_portal.c 		bman_offline_cpu(cpu);
cpu                57 drivers/soc/fsl/qbman/bman_priv.h 	int cpu;
cpu               115 drivers/soc/fsl/qbman/dpaa_sys.h 					       int irq, int cpu)
cpu               124 drivers/soc/fsl/qbman/dpaa_sys.h 	if (cpu == -1 || !cpu_online(cpu))
cpu               125 drivers/soc/fsl/qbman/dpaa_sys.h 		cpu = cpumask_any(cpu_online_mask);
cpu               127 drivers/soc/fsl/qbman/dpaa_sys.h 	ret = irq_set_affinity(irq, cpumask_of(cpu));
cpu               129 drivers/soc/fsl/qbman/dpaa_sys.h 		dev_err(dev, "irq_set_affinity() on CPU %d failed\n", cpu);
cpu              1300 drivers/soc/fsl/qbman/qman.c 	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
cpu              1307 drivers/soc/fsl/qbman/qman.c 	if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
cpu              1366 drivers/soc/fsl/qbman/qman.c 	portal = &per_cpu(qman_affine_portal, c->cpu);
cpu              1372 drivers/soc/fsl/qbman/qman.c 	cpumask_set_cpu(c->cpu, &affine_mask);
cpu              1373 drivers/soc/fsl/qbman/qman.c 	affine_channels[c->cpu] = c->channel;
cpu              1374 drivers/soc/fsl/qbman/qman.c 	affine_portals[c->cpu] = portal;
cpu              1415 drivers/soc/fsl/qbman/qman.c 	int cpu;
cpu              1418 drivers/soc/fsl/qbman/qman.c 	cpu = pcfg->cpu;
cpu              1423 drivers/soc/fsl/qbman/qman.c 	cpumask_clear_cpu(cpu, &affine_mask);
cpu              1733 drivers/soc/fsl/qbman/qman.c u16 qman_affine_channel(int cpu)
cpu              1735 drivers/soc/fsl/qbman/qman.c 	if (cpu < 0) {
cpu              1738 drivers/soc/fsl/qbman/qman.c 		cpu = portal->config->cpu;
cpu              1741 drivers/soc/fsl/qbman/qman.c 	WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
cpu              1742 drivers/soc/fsl/qbman/qman.c 	return affine_channels[cpu];
cpu              1746 drivers/soc/fsl/qbman/qman.c struct qman_portal *qman_get_affine_portal(int cpu)
cpu              1748 drivers/soc/fsl/qbman/qman.c 	return affine_portals[cpu];
cpu                45 drivers/soc/fsl/qbman/qman_portal.c static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
cpu                77 drivers/soc/fsl/qbman/qman_portal.c 	stash_attr.cpu = cpu;
cpu               111 drivers/soc/fsl/qbman/qman_portal.c 	qman_set_sdest(pcfg->channel, cpu);
cpu               133 drivers/soc/fsl/qbman/qman_portal.c 	portal_set_cpu(pcfg, pcfg->cpu);
cpu               138 drivers/soc/fsl/qbman/qman_portal.c 			 __func__, pcfg->cpu);
cpu               163 drivers/soc/fsl/qbman/qman_portal.c 	dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
cpu               169 drivers/soc/fsl/qbman/qman_portal.c 							unsigned int cpu)
cpu               176 drivers/soc/fsl/qbman/qman_portal.c 		stash_attr.cpu = cpu;
cpu               187 drivers/soc/fsl/qbman/qman_portal.c 	qman_set_sdest(pcfg->channel, cpu);
cpu               190 drivers/soc/fsl/qbman/qman_portal.c static int qman_offline_cpu(unsigned int cpu)
cpu               195 drivers/soc/fsl/qbman/qman_portal.c 	p = affine_portals[cpu];
cpu               200 drivers/soc/fsl/qbman/qman_portal.c 			cpu = cpumask_any_but(cpu_online_mask, cpu);
cpu               201 drivers/soc/fsl/qbman/qman_portal.c 			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
cpu               202 drivers/soc/fsl/qbman/qman_portal.c 			qman_portal_update_sdest(pcfg, cpu);
cpu               208 drivers/soc/fsl/qbman/qman_portal.c static int qman_online_cpu(unsigned int cpu)
cpu               213 drivers/soc/fsl/qbman/qman_portal.c 	p = affine_portals[cpu];
cpu               217 drivers/soc/fsl/qbman/qman_portal.c 			irq_set_affinity(pcfg->irq, cpumask_of(cpu));
cpu               218 drivers/soc/fsl/qbman/qman_portal.c 			qman_portal_update_sdest(pcfg, cpu);
cpu               236 drivers/soc/fsl/qbman/qman_portal.c 	int irq, cpu, err, i;
cpu               276 drivers/soc/fsl/qbman/qman_portal.c 	pcfg->cpu = -1;
cpu               300 drivers/soc/fsl/qbman/qman_portal.c 	cpu = cpumask_next_zero(-1, &portal_cpus);
cpu               301 drivers/soc/fsl/qbman/qman_portal.c 	if (cpu >= nr_cpu_ids) {
cpu               308 drivers/soc/fsl/qbman/qman_portal.c 	cpumask_set_cpu(cpu, &portal_cpus);
cpu               310 drivers/soc/fsl/qbman/qman_portal.c 	pcfg->cpu = cpu;
cpu               323 drivers/soc/fsl/qbman/qman_portal.c 	if (!cpu_online(cpu))
cpu               324 drivers/soc/fsl/qbman/qman_portal.c 		qman_offline_cpu(cpu);
cpu               165 drivers/soc/fsl/qbman/qman_priv.h 	int cpu;
cpu               104 drivers/soc/fsl/qbman/qman_test_stash.c 	int cpu;
cpu               106 drivers/soc/fsl/qbman/qman_test_stash.c 	for_each_cpu(cpu, cpu_online_mask) {
cpu               112 drivers/soc/fsl/qbman/qman_test_stash.c 			"hotpotato%d", cpu);
cpu               117 drivers/soc/fsl/qbman/qman_test_stash.c 		kthread_bind(k, cpu);
cpu               203 drivers/soc/qcom/spm.c static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
cpu               247 drivers/soc/qcom/spm.c 	fns = devm_kcalloc(get_cpu_device(cpu), state_count, sizeof(*fns),
cpu               258 drivers/soc/qcom/spm.c 		cpumask_set_cpu(cpu, &mask);
cpu               262 drivers/soc/qcom/spm.c 	per_cpu(qcom_idle_ops, cpu) = fns;
cpu               270 drivers/soc/qcom/spm.c 	return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
cpu               286 drivers/soc/qcom/spm.c 	int cpu;
cpu               289 drivers/soc/qcom/spm.c 	for_each_possible_cpu(cpu) {
cpu               290 drivers/soc/qcom/spm.c 		cpu_node = of_cpu_device_node_get(cpu);
cpu               304 drivers/soc/qcom/spm.c 			*spm_cpu = cpu;
cpu               326 drivers/soc/qcom/spm.c 	int cpu;
cpu               328 drivers/soc/qcom/spm.c 	drv = spm_get_drv(pdev, &cpu);
cpu               365 drivers/soc/qcom/spm.c 	per_cpu(cpu_spm_drv, cpu) = drv;
cpu                35 drivers/soc/renesas/r9a06g032-smp.c r9a06g032_smp_boot_secondary(unsigned int cpu,
cpu                44 drivers/soc/renesas/r9a06g032-smp.c 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
cpu               458 drivers/soc/renesas/rcar-sysc.c int rcar_sysc_power_down_cpu(unsigned int cpu)
cpu               460 drivers/soc/renesas/rcar-sysc.c 	return rcar_sysc_power_cpu(cpu, false);
cpu               463 drivers/soc/renesas/rcar-sysc.c int rcar_sysc_power_up_cpu(unsigned int cpu)
cpu               465 drivers/soc/renesas/rcar-sysc.c 	return rcar_sysc_power_cpu(cpu, true);
cpu               430 drivers/soc/ti/knav_qmss_queue.c 	int cpu = 0;
cpu               443 drivers/soc/ti/knav_qmss_queue.c 		for_each_possible_cpu(cpu) {
cpu               444 drivers/soc/ti/knav_qmss_queue.c 			pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
cpu               445 drivers/soc/ti/knav_qmss_queue.c 			pops += per_cpu_ptr(qh->stats, cpu)->pops;
cpu               446 drivers/soc/ti/knav_qmss_queue.c 			push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
cpu               447 drivers/soc/ti/knav_qmss_queue.c 			pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
cpu               448 drivers/soc/ti/knav_qmss_queue.c 			notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
cpu                72 drivers/staging/media/hantro/hantro_drv.c 	if (ctx->jpeg_enc.bounce_buffer.cpu) {
cpu                75 drivers/staging/media/hantro/hantro_drv.c 		       ctx->jpeg_enc.bounce_buffer.cpu, bytesused);
cpu                92 drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c 	hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu,
cpu               446 drivers/staging/media/hantro/hantro_g1_vp8_dec.c 	if (VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
cpu               447 drivers/staging/media/hantro/hantro_g1_vp8_dec.c 		memset(ctx->vp8_dec.segment_map.cpu, 0,
cpu               205 drivers/staging/media/hantro/hantro_h264.c 	struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
cpu               228 drivers/staging/media/hantro/hantro_h264.c 	struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
cpu               614 drivers/staging/media/hantro/hantro_h264.c 	dma_free_coherent(vpu->dev, priv->size, priv->cpu, priv->dma);
cpu               625 drivers/staging/media/hantro/hantro_h264.c 	priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma,
cpu               627 drivers/staging/media/hantro/hantro_h264.c 	if (!priv->cpu)
cpu               631 drivers/staging/media/hantro/hantro_h264.c 	tbl = priv->cpu;
cpu                33 drivers/staging/media/hantro/hantro_hw.h 	void *cpu;
cpu               300 drivers/staging/media/hantro/hantro_jpeg.c 	ctx->jpeg_enc.bounce_buffer.cpu =
cpu               306 drivers/staging/media/hantro/hantro_jpeg.c 	if (!ctx->jpeg_enc.bounce_buffer.cpu)
cpu               316 drivers/staging/media/hantro/hantro_jpeg.c 		       ctx->jpeg_enc.bounce_buffer.cpu,
cpu                43 drivers/staging/media/hantro/hantro_mpeg2.c 	ctx->mpeg2_dec.qtable.cpu =
cpu                48 drivers/staging/media/hantro/hantro_mpeg2.c 	if (!ctx->mpeg2_dec.qtable.cpu)
cpu                59 drivers/staging/media/hantro/hantro_mpeg2.c 			  ctx->mpeg2_dec.qtable.cpu,
cpu                57 drivers/staging/media/hantro/hantro_vp8.c 	dst = ctx->vp8_dec.prob_tbl.cpu;
cpu               111 drivers/staging/media/hantro/hantro_vp8.c 	dst = ctx->vp8_dec.prob_tbl.cpu;
cpu               126 drivers/staging/media/hantro/hantro_vp8.c 	dst = ctx->vp8_dec.prob_tbl.cpu;
cpu               164 drivers/staging/media/hantro/hantro_vp8.c 	aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
cpu               166 drivers/staging/media/hantro/hantro_vp8.c 	if (!aux_buf->cpu)
cpu               175 drivers/staging/media/hantro/hantro_vp8.c 	aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
cpu               177 drivers/staging/media/hantro/hantro_vp8.c 	if (!aux_buf->cpu) {
cpu               186 drivers/staging/media/hantro/hantro_vp8.c 			  ctx->vp8_dec.segment_map.cpu,
cpu               198 drivers/staging/media/hantro/hantro_vp8.c 			  vp8_dec->segment_map.cpu, vp8_dec->segment_map.dma);
cpu               200 drivers/staging/media/hantro/hantro_vp8.c 			  vp8_dec->prob_tbl.cpu, vp8_dec->prob_tbl.dma);
cpu                94 drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c 	hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, quantization);
cpu               525 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c 	if (VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
cpu               526 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c 		memset(ctx->vp8_dec.segment_map.cpu, 0,
cpu              1468 drivers/staging/qlge/qlge.h 	u32 cpu;		/* Which CPU this should run on. */
cpu              1798 drivers/staging/qlge/qlge_dbg.c 	pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
cpu              4143 drivers/staging/qlge/qlge_main.c 		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
cpu              3575 drivers/target/iscsi/iscsi_target.c 	int ord, cpu;
cpu              3585 drivers/target/iscsi/iscsi_target.c 	for_each_online_cpu(cpu) {
cpu              3587 drivers/target/iscsi/iscsi_target.c 			cpumask_set_cpu(cpu, conn->conn_cpumask);
cpu               176 drivers/target/iscsi/iscsi_target_util.c 	int size, tag, cpu;
cpu               178 drivers/target/iscsi/iscsi_target_util.c 	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
cpu               180 drivers/target/iscsi/iscsi_target_util.c 		tag = iscsit_wait_for_tag(se_sess, state, &cpu);
cpu               189 drivers/target/iscsi/iscsi_target_util.c 	cmd->se_cmd.map_cpu = cpu;
cpu               915 drivers/target/sbp/sbp_target.c 	int tag, cpu;
cpu               917 drivers/target/sbp/sbp_target.c 	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
cpu               924 drivers/target/sbp/sbp_target.c 	req->se_cmd.map_cpu = cpu;
cpu               431 drivers/target/tcm_fc/tfc_cmd.c 	int tag, cpu;
cpu               433 drivers/target/tcm_fc/tfc_cmd.c 	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
cpu               441 drivers/target/tcm_fc/tfc_cmd.c 	cmd->se_cmd.map_cpu = cpu;
cpu               139 drivers/thermal/cpu_cooling.c 	int num_opps = 0, cpu = cpufreq_cdev->policy->cpu, i;
cpu               141 drivers/thermal/cpu_cooling.c 	dev = get_cpu_device(cpu);
cpu               143 drivers/thermal/cpu_cooling.c 		pr_warn("No cpu device for cpu %d\n", cpu);
cpu               229 drivers/thermal/cpu_cooling.c static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
cpu               236 drivers/thermal/cpu_cooling.c 	now_idle = get_cpu_idle_time(cpu, &now, 0);
cpu               366 drivers/thermal/cpu_cooling.c 	int i = 0, cpu;
cpu               372 drivers/thermal/cpu_cooling.c 	freq = cpufreq_quick_get(policy->cpu);
cpu               380 drivers/thermal/cpu_cooling.c 	for_each_cpu(cpu, policy->related_cpus) {
cpu               383 drivers/thermal/cpu_cooling.c 		if (cpu_online(cpu))
cpu               384 drivers/thermal/cpu_cooling.c 			load = get_load(cpufreq_cdev, cpu, i);
cpu               540 drivers/thermal/cpu_cooling.c 	dev = get_cpu_device(policy->cpu);
cpu               542 drivers/thermal/cpu_cooling.c 		pr_warn("No cpu device for cpu %d\n", policy->cpu);
cpu               692 drivers/thermal/cpu_cooling.c 	struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
cpu               698 drivers/thermal/cpu_cooling.c 		       policy->cpu);
cpu               709 drivers/thermal/cpu_cooling.c 			       policy->cpu, PTR_ERR(cdev));
cpu               667 drivers/thermal/imx_thermal.c 	np = of_get_cpu_node(data->policy->cpu, NULL);
cpu               230 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	int cpu;
cpu               237 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	for_each_online_cpu(cpu) {
cpu               238 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 		err = rdmsr_safe_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax,
cpu               465 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c static int rapl_mmio_cpu_online(unsigned int cpu)
cpu               470 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	if (topology_physical_package_id(cpu))
cpu               473 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	rp = rapl_find_package_domain(cpu, &rapl_mmio_priv);
cpu               475 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 		rp = rapl_add_package(cpu, &rapl_mmio_priv);
cpu               479 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	cpumask_set_cpu(cpu, &rp->cpumask);
cpu               483 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c static int rapl_mmio_cpu_down_prep(unsigned int cpu)
cpu               488 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	rp = rapl_find_package_domain(cpu, &rapl_mmio_priv);
cpu               492 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	cpumask_clear_cpu(cpu, &rp->cpumask);
cpu               496 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c 	else if (rp->lead_cpu == cpu)
cpu               501 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c static int rapl_mmio_read_raw(int cpu, struct reg_action *ra)
cpu               511 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c static int rapl_mmio_write_raw(int cpu, struct reg_action *ra)
cpu                80 drivers/thermal/intel/intel_powerclamp.c 	unsigned int cpu;
cpu               404 drivers/thermal/intel/intel_powerclamp.c 	if (clamping && w_data->clamping && cpu_online(w_data->cpu))
cpu               421 drivers/thermal/intel/intel_powerclamp.c 	if (w_data->cpu == control_cpu &&
cpu               436 drivers/thermal/intel/intel_powerclamp.c 	if (clamping && w_data->clamping && cpu_online(w_data->cpu))
cpu               477 drivers/thermal/intel/intel_powerclamp.c static void start_power_clamp_worker(unsigned long cpu)
cpu               479 drivers/thermal/intel/intel_powerclamp.c 	struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
cpu               482 drivers/thermal/intel/intel_powerclamp.c 	worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
cpu               488 drivers/thermal/intel/intel_powerclamp.c 	w_data->cpu = cpu;
cpu               490 drivers/thermal/intel/intel_powerclamp.c 	set_bit(cpu, cpu_clamping_mask);
cpu               498 drivers/thermal/intel/intel_powerclamp.c static void stop_power_clamp_worker(unsigned long cpu)
cpu               500 drivers/thermal/intel/intel_powerclamp.c 	struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
cpu               522 drivers/thermal/intel/intel_powerclamp.c 	clear_bit(w_data->cpu, cpu_clamping_mask);
cpu               530 drivers/thermal/intel/intel_powerclamp.c 	unsigned long cpu;
cpu               545 drivers/thermal/intel/intel_powerclamp.c 	for_each_online_cpu(cpu) {
cpu               546 drivers/thermal/intel/intel_powerclamp.c 		start_power_clamp_worker(cpu);
cpu               571 drivers/thermal/intel/intel_powerclamp.c static int powerclamp_cpu_online(unsigned int cpu)
cpu               575 drivers/thermal/intel/intel_powerclamp.c 	start_power_clamp_worker(cpu);
cpu               577 drivers/thermal/intel/intel_powerclamp.c 	if (cpu == 0) {
cpu               584 drivers/thermal/intel/intel_powerclamp.c static int powerclamp_cpu_predown(unsigned int cpu)
cpu               589 drivers/thermal/intel/intel_powerclamp.c 	stop_power_clamp_worker(cpu);
cpu               590 drivers/thermal/intel/intel_powerclamp.c 	if (cpu != control_cpu)
cpu               594 drivers/thermal/intel/intel_powerclamp.c 	if (control_cpu == cpu)
cpu               595 drivers/thermal/intel/intel_powerclamp.c 		control_cpu = cpumask_next(cpu, cpu_online_mask);
cpu                47 drivers/thermal/intel/x86_pkg_temp_thermal.c 	int				cpu;
cpu                96 drivers/thermal/intel/x86_pkg_temp_thermal.c static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
cpu                98 drivers/thermal/intel/x86_pkg_temp_thermal.c 	int id = topology_logical_die_id(cpu);
cpu               109 drivers/thermal/intel/x86_pkg_temp_thermal.c static int get_tj_max(int cpu, u32 *tj_max)
cpu               114 drivers/thermal/intel/x86_pkg_temp_thermal.c 	err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
cpu               129 drivers/thermal/intel/x86_pkg_temp_thermal.c 	rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_STATUS,
cpu               158 drivers/thermal/intel/x86_pkg_temp_thermal.c 	ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
cpu               183 drivers/thermal/intel/x86_pkg_temp_thermal.c 	ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
cpu               209 drivers/thermal/intel/x86_pkg_temp_thermal.c 	return wrmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
cpu               264 drivers/thermal/intel/x86_pkg_temp_thermal.c 	int cpu = smp_processor_id();
cpu               272 drivers/thermal/intel/x86_pkg_temp_thermal.c 	zonedev = pkg_temp_thermal_get_dev(cpu);
cpu               300 drivers/thermal/intel/x86_pkg_temp_thermal.c static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work)
cpu               304 drivers/thermal/intel/x86_pkg_temp_thermal.c 	schedule_delayed_work_on(cpu, work, ms);
cpu               309 drivers/thermal/intel/x86_pkg_temp_thermal.c 	int cpu = smp_processor_id();
cpu               319 drivers/thermal/intel/x86_pkg_temp_thermal.c 	zonedev = pkg_temp_thermal_get_dev(cpu);
cpu               322 drivers/thermal/intel/x86_pkg_temp_thermal.c 		pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
cpu               329 drivers/thermal/intel/x86_pkg_temp_thermal.c static int pkg_temp_thermal_device_add(unsigned int cpu)
cpu               331 drivers/thermal/intel/x86_pkg_temp_thermal.c 	int id = topology_logical_die_id(cpu);
cpu               346 drivers/thermal/intel/x86_pkg_temp_thermal.c 	err = get_tj_max(cpu, &tj_max);
cpu               355 drivers/thermal/intel/x86_pkg_temp_thermal.c 	zonedev->cpu = cpu;
cpu               370 drivers/thermal/intel/x86_pkg_temp_thermal.c 	cpumask_set_cpu(cpu, &zonedev->cpumask);
cpu               377 drivers/thermal/intel/x86_pkg_temp_thermal.c static int pkg_thermal_cpu_offline(unsigned int cpu)
cpu               379 drivers/thermal/intel/x86_pkg_temp_thermal.c 	struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
cpu               386 drivers/thermal/intel/x86_pkg_temp_thermal.c 	target = cpumask_any_but(&zonedev->cpumask, cpu);
cpu               387 drivers/thermal/intel/x86_pkg_temp_thermal.c 	cpumask_clear_cpu(cpu, &zonedev->cpumask);
cpu               417 drivers/thermal/intel/x86_pkg_temp_thermal.c 	was_target = zonedev->cpu == cpu;
cpu               418 drivers/thermal/intel/x86_pkg_temp_thermal.c 	zonedev->cpu = target;
cpu               427 drivers/thermal/intel/x86_pkg_temp_thermal.c 		zones[topology_logical_die_id(cpu)] = NULL;
cpu               463 drivers/thermal/intel/x86_pkg_temp_thermal.c static int pkg_thermal_cpu_online(unsigned int cpu)
cpu               465 drivers/thermal/intel/x86_pkg_temp_thermal.c 	struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu);
cpu               466 drivers/thermal/intel/x86_pkg_temp_thermal.c 	struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu               474 drivers/thermal/intel/x86_pkg_temp_thermal.c 		cpumask_set_cpu(cpu, &zonedev->cpumask);
cpu               477 drivers/thermal/intel/x86_pkg_temp_thermal.c 	return pkg_temp_thermal_device_add(cpu);
cpu               144 drivers/tty/mips_ejtag_fdc.c 	unsigned int			 cpu;
cpu               308 drivers/tty/mips_ejtag_fdc.c 	unsigned int i, buf_len, cpu;
cpu               316 drivers/tty/mips_ejtag_fdc.c 	cpu = smp_processor_id();
cpu               317 drivers/tty/mips_ejtag_fdc.c 	regs = cons->regs[cpu];
cpu               321 drivers/tty/mips_ejtag_fdc.c 		cons->regs[cpu] = regs;
cpu               662 drivers/tty/mips_ejtag_fdc.c 	if (smp_processor_id() != priv->cpu)
cpu               895 drivers/tty/mips_ejtag_fdc.c 	priv->cpu = dev->cpu;
cpu               926 drivers/tty/mips_ejtag_fdc.c 	snprintf(priv->fdc_name, sizeof(priv->fdc_name), "ttyFDC%u", dev->cpu);
cpu               952 drivers/tty/mips_ejtag_fdc.c 	mips_ejtag_fdc_con.regs[dev->cpu] = priv->reg;
cpu               953 drivers/tty/mips_ejtag_fdc.c 	if (dev->cpu == 0)
cpu               968 drivers/tty/mips_ejtag_fdc.c 	kthread_bind(priv->thread, dev->cpu);
cpu              1009 drivers/tty/mips_ejtag_fdc.c 		add_timer_on(&priv->poll_timer, dev->cpu);
cpu              1038 drivers/tty/mips_ejtag_fdc.c 	if (dev->cpu == 0)
cpu              1093 drivers/tty/mips_ejtag_fdc.c 		add_timer_on(&priv->poll_timer, dev->cpu);
cpu              1104 drivers/tty/mips_ejtag_fdc.c 	kthread_bind(priv->thread, dev->cpu);
cpu              1163 drivers/tty/mips_ejtag_fdc.c 	unsigned int cpu;
cpu              1166 drivers/tty/mips_ejtag_fdc.c 	cpu = smp_processor_id();
cpu              1167 drivers/tty/mips_ejtag_fdc.c 	regs = mips_ejtag_fdc_con.regs[cpu];
cpu              1171 drivers/tty/mips_ejtag_fdc.c 		mips_ejtag_fdc_con.regs[cpu] = regs;
cpu               406 drivers/tty/serial/sunhv.c void sunhv_migrate_hvcons_irq(int cpu)
cpu               409 drivers/tty/serial/sunhv.c 	irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
cpu              1074 drivers/usb/gadget/function/f_tcm.c 	int tag, cpu;
cpu              1076 drivers/usb/gadget/function/f_tcm.c 	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
cpu              1083 drivers/usb/gadget/function/f_tcm.c 	cmd->se_cmd.map_cpu = cpu;
cpu               578 drivers/vhost/scsi.c 	int tag, cpu;
cpu               587 drivers/vhost/scsi.c 	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
cpu               602 drivers/vhost/scsi.c 	cmd->tvc_se_cmd.map_cpu = cpu;
cpu                71 drivers/video/fbdev/gbefb.c 	uint16_t *cpu;
cpu              1022 drivers/video/fbdev/gbefb.c 	tile = &gbe_tiles.cpu[offset >> TILE_SHIFT];
cpu              1160 drivers/video/fbdev/gbefb.c 	gbe_tiles.cpu = dmam_alloc_coherent(&p_dev->dev,
cpu              1163 drivers/video/fbdev/gbefb.c 	if (!gbe_tiles.cpu) {
cpu              1200 drivers/video/fbdev/gbefb.c 		gbe_tiles.cpu[i] = (gbe_mem_phys >> TILE_SHIFT) + i;
cpu               112 drivers/watchdog/octeon-wdt-main.c static int cpu2core(int cpu)
cpu               115 drivers/watchdog/octeon-wdt-main.c 	return cpu_logical_map(cpu) & 0x3f;
cpu               131 drivers/watchdog/octeon-wdt-main.c 	int cpu = raw_smp_processor_id();
cpu               132 drivers/watchdog/octeon-wdt-main.c 	unsigned int core = cpu2core(cpu);
cpu               133 drivers/watchdog/octeon-wdt-main.c 	int node = cpu_to_node(cpu);
cpu               136 drivers/watchdog/octeon-wdt-main.c 		if (per_cpu_countdown[cpu] > 0) {
cpu               139 drivers/watchdog/octeon-wdt-main.c 			per_cpu_countdown[cpu]--;
cpu               143 drivers/watchdog/octeon-wdt-main.c 			cpumask_clear_cpu(cpu, &irq_enabled_cpus);
cpu               299 drivers/watchdog/octeon-wdt-main.c static int octeon_wdt_cpu_to_irq(int cpu)
cpu               305 drivers/watchdog/octeon-wdt-main.c 	coreid = cpu2core(cpu);
cpu               306 drivers/watchdog/octeon-wdt-main.c 	node = cpu_to_node(cpu);
cpu               322 drivers/watchdog/octeon-wdt-main.c static int octeon_wdt_cpu_pre_down(unsigned int cpu)
cpu               328 drivers/watchdog/octeon-wdt-main.c 	core = cpu2core(cpu);
cpu               330 drivers/watchdog/octeon-wdt-main.c 	node = cpu_to_node(cpu);
cpu               339 drivers/watchdog/octeon-wdt-main.c 	free_irq(octeon_wdt_cpu_to_irq(cpu), octeon_wdt_poke_irq);
cpu               343 drivers/watchdog/octeon-wdt-main.c static int octeon_wdt_cpu_online(unsigned int cpu)
cpu               352 drivers/watchdog/octeon-wdt-main.c 	core = cpu2core(cpu);
cpu               353 drivers/watchdog/octeon-wdt-main.c 	node = cpu_to_node(cpu);
cpu               361 drivers/watchdog/octeon-wdt-main.c 	per_cpu_countdown[cpu] = countdown_reset;
cpu               384 drivers/watchdog/octeon-wdt-main.c 		cpumask_set_cpu(cpu, &mask);
cpu               388 drivers/watchdog/octeon-wdt-main.c 	cpumask_set_cpu(cpu, &irq_enabled_cpus);
cpu               404 drivers/watchdog/octeon-wdt-main.c 	int cpu;
cpu               411 drivers/watchdog/octeon-wdt-main.c 	for_each_online_cpu(cpu) {
cpu               412 drivers/watchdog/octeon-wdt-main.c 		coreid = cpu2core(cpu);
cpu               413 drivers/watchdog/octeon-wdt-main.c 		node = cpu_to_node(cpu);
cpu               415 drivers/watchdog/octeon-wdt-main.c 		per_cpu_countdown[cpu] = countdown_reset;
cpu               417 drivers/watchdog/octeon-wdt-main.c 		    !cpumask_test_cpu(cpu, &irq_enabled_cpus)) {
cpu               419 drivers/watchdog/octeon-wdt-main.c 			enable_irq(octeon_wdt_cpu_to_irq(cpu));
cpu               420 drivers/watchdog/octeon-wdt-main.c 			cpumask_set_cpu(cpu, &irq_enabled_cpus);
cpu               455 drivers/watchdog/octeon-wdt-main.c 	int cpu;
cpu               468 drivers/watchdog/octeon-wdt-main.c 	for_each_online_cpu(cpu) {
cpu               469 drivers/watchdog/octeon-wdt-main.c 		coreid = cpu2core(cpu);
cpu               470 drivers/watchdog/octeon-wdt-main.c 		node = cpu_to_node(cpu);
cpu                12 drivers/xen/cpu_hotplug.c static void enable_hotplug_cpu(int cpu)
cpu                14 drivers/xen/cpu_hotplug.c 	if (!cpu_present(cpu))
cpu                15 drivers/xen/cpu_hotplug.c 		xen_arch_register_cpu(cpu);
cpu                17 drivers/xen/cpu_hotplug.c 	set_cpu_present(cpu, true);
cpu                20 drivers/xen/cpu_hotplug.c static void disable_hotplug_cpu(int cpu)
cpu                22 drivers/xen/cpu_hotplug.c 	if (!cpu_is_hotpluggable(cpu))
cpu                25 drivers/xen/cpu_hotplug.c 	if (cpu_online(cpu))
cpu                26 drivers/xen/cpu_hotplug.c 		device_offline(get_cpu_device(cpu));
cpu                27 drivers/xen/cpu_hotplug.c 	if (!cpu_online(cpu) && cpu_present(cpu)) {
cpu                28 drivers/xen/cpu_hotplug.c 		xen_arch_unregister_cpu(cpu);
cpu                29 drivers/xen/cpu_hotplug.c 		set_cpu_present(cpu, false);
cpu                34 drivers/xen/cpu_hotplug.c static int vcpu_online(unsigned int cpu)
cpu                39 drivers/xen/cpu_hotplug.c 	sprintf(dir, "cpu/%u", cpu);
cpu                52 drivers/xen/cpu_hotplug.c 	pr_err("unknown state(%s) on CPU%d\n", state, cpu);
cpu                55 drivers/xen/cpu_hotplug.c static void vcpu_hotplug(unsigned int cpu)
cpu                57 drivers/xen/cpu_hotplug.c 	if (cpu >= nr_cpu_ids || !cpu_possible(cpu))
cpu                60 drivers/xen/cpu_hotplug.c 	switch (vcpu_online(cpu)) {
cpu                62 drivers/xen/cpu_hotplug.c 		enable_hotplug_cpu(cpu);
cpu                65 drivers/xen/cpu_hotplug.c 		disable_hotplug_cpu(cpu);
cpu                75 drivers/xen/cpu_hotplug.c 	unsigned int cpu;
cpu                80 drivers/xen/cpu_hotplug.c 		sscanf(cpustr, "cpu/%u", &cpu);
cpu                81 drivers/xen/cpu_hotplug.c 		vcpu_hotplug(cpu);
cpu                88 drivers/xen/cpu_hotplug.c 	int cpu;
cpu                95 drivers/xen/cpu_hotplug.c 	for_each_possible_cpu(cpu) {
cpu                96 drivers/xen/cpu_hotplug.c 		if (vcpu_online(cpu) == 0) {
cpu                97 drivers/xen/cpu_hotplug.c 			(void)cpu_down(cpu);
cpu                98 drivers/xen/cpu_hotplug.c 			set_cpu_present(cpu, false);
cpu                50 drivers/xen/events/events_2l.c static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
cpu                52 drivers/xen/events/events_2l.c 	clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
cpu                53 drivers/xen/events/events_2l.c 	set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
cpu                89 drivers/xen/events/events_2l.c 	unsigned int cpu = get_cpu();
cpu                94 drivers/xen/events/events_2l.c 	if (unlikely((cpu != cpu_from_evtchn(port))))
cpu               145 drivers/xen/events/events_2l.c static inline xen_ulong_t active_evtchns(unsigned int cpu,
cpu               150 drivers/xen/events/events_2l.c 		per_cpu(cpu_evtchn_mask, cpu)[idx] &
cpu               162 drivers/xen/events/events_2l.c static void evtchn_2l_handle_events(unsigned cpu)
cpu               174 drivers/xen/events/events_2l.c 	irq = irq_from_virq(cpu, VIRQ_TIMER);
cpu               179 drivers/xen/events/events_2l.c 		if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
cpu               210 drivers/xen/events/events_2l.c 		pending_bits = active_evtchns(cpu, s, word_idx);
cpu               268 drivers/xen/events/events_2l.c 	int cpu = smp_processor_id();
cpu               269 drivers/xen/events/events_2l.c 	xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
cpu               277 drivers/xen/events/events_2l.c 	printk("\nvcpu %d\n  ", cpu);
cpu               282 drivers/xen/events/events_2l.c 		pending = (get_irq_regs() && i == cpu)
cpu               290 drivers/xen/events/events_2l.c 	v = per_cpu(xen_vcpu, cpu);
cpu               312 drivers/xen/events/events_2l.c 	printk("\nlocal cpu%d mask:\n   ", cpu);
cpu               166 drivers/xen/events/events_base.c 				     unsigned short cpu)
cpu               175 drivers/xen/events/events_base.c 	info->cpu = cpu;
cpu               194 drivers/xen/events/events_base.c static int xen_irq_info_ipi_setup(unsigned cpu,
cpu               203 drivers/xen/events/events_base.c 	per_cpu(ipi_to_irq, cpu)[ipi] = irq;
cpu               208 drivers/xen/events/events_base.c static int xen_irq_info_virq_setup(unsigned cpu,
cpu               217 drivers/xen/events/events_base.c 	per_cpu(virq_to_irq, cpu)[virq] = irq;
cpu               262 drivers/xen/events/events_base.c int irq_from_virq(unsigned int cpu, unsigned int virq)
cpu               264 drivers/xen/events/events_base.c 	return per_cpu(virq_to_irq, cpu)[virq];
cpu               304 drivers/xen/events/events_base.c 	return info_for_irq(irq)->cpu;
cpu               333 drivers/xen/events/events_base.c static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
cpu               340 drivers/xen/events/events_base.c 	cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
cpu               342 drivers/xen/events/events_base.c 	xen_evtchn_port_bind_to_cpu(info, cpu);
cpu               344 drivers/xen/events/events_base.c 	info->cpu = cpu;
cpu               614 drivers/xen/events/events_base.c 		unsigned int cpu = cpu_from_irq(irq);
cpu               620 drivers/xen/events/events_base.c 			per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
cpu               623 drivers/xen/events/events_base.c 			per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
cpu               870 drivers/xen/events/events_base.c static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
cpu               878 drivers/xen/events/events_base.c 	irq = per_cpu(ipi_to_irq, cpu)[ipi];
cpu               888 drivers/xen/events/events_base.c 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
cpu               894 drivers/xen/events/events_base.c 		ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
cpu               900 drivers/xen/events/events_base.c 		bind_evtchn_to_cpu(evtchn, cpu);
cpu               927 drivers/xen/events/events_base.c static int find_virq(unsigned int virq, unsigned int cpu)
cpu               941 drivers/xen/events/events_base.c 		if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
cpu               962 drivers/xen/events/events_base.c int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
cpu               969 drivers/xen/events/events_base.c 	irq = per_cpu(virq_to_irq, cpu)[virq];
cpu               984 drivers/xen/events/events_base.c 		bind_virq.vcpu = xen_vcpu_nr(cpu);
cpu               991 drivers/xen/events/events_base.c 				ret = find_virq(virq, cpu);
cpu               996 drivers/xen/events/events_base.c 		ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
cpu              1003 drivers/xen/events/events_base.c 		bind_evtchn_to_cpu(evtchn, cpu);
cpu              1065 drivers/xen/events/events_base.c int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
cpu              1071 drivers/xen/events/events_base.c 	irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
cpu              1085 drivers/xen/events/events_base.c 			   unsigned int cpu,
cpu              1093 drivers/xen/events/events_base.c 	irq = bind_ipi_to_irq(ipi, cpu);
cpu              1198 drivers/xen/events/events_base.c void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
cpu              1204 drivers/xen/events/events_base.c 		int rc =  HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
cpu              1207 drivers/xen/events/events_base.c 			printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
cpu              1211 drivers/xen/events/events_base.c 	irq = per_cpu(ipi_to_irq, cpu)[vector];
cpu              1221 drivers/xen/events/events_base.c 	int cpu = get_cpu();
cpu              1230 drivers/xen/events/events_base.c 		xen_evtchn_handle_events(cpu);
cpu              1288 drivers/xen/events/events_base.c         bind_evtchn_to_cpu(evtchn, info->cpu);
cpu              1290 drivers/xen/events/events_base.c 	irq_set_affinity(irq, cpumask_of(info->cpu));
cpu              1450 drivers/xen/events/events_base.c static void restore_cpu_virqs(unsigned int cpu)
cpu              1456 drivers/xen/events/events_base.c 		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
cpu              1463 drivers/xen/events/events_base.c 		bind_virq.vcpu = xen_vcpu_nr(cpu);
cpu              1470 drivers/xen/events/events_base.c 		(void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
cpu              1471 drivers/xen/events/events_base.c 		bind_evtchn_to_cpu(evtchn, cpu);
cpu              1475 drivers/xen/events/events_base.c static void restore_cpu_ipis(unsigned int cpu)
cpu              1481 drivers/xen/events/events_base.c 		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
cpu              1487 drivers/xen/events/events_base.c 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
cpu              1494 drivers/xen/events/events_base.c 		(void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
cpu              1495 drivers/xen/events/events_base.c 		bind_evtchn_to_cpu(evtchn, cpu);
cpu              1571 drivers/xen/events/events_base.c 	unsigned int cpu;
cpu              1583 drivers/xen/events/events_base.c 	for_each_possible_cpu(cpu) {
cpu              1584 drivers/xen/events/events_base.c 		restore_cpu_virqs(cpu);
cpu              1585 drivers/xen/events/events_base.c 		restore_cpu_ipis(cpu);
cpu               102 drivers/xen/events/events_fifo.c static int init_control_block(int cpu,
cpu               105 drivers/xen/events/events_fifo.c 	struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
cpu               116 drivers/xen/events/events_fifo.c 	init_control.vcpu        = xen_vcpu_nr(cpu);
cpu               189 drivers/xen/events/events_fifo.c static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
cpu               282 drivers/xen/events/events_fifo.c static void consume_one_event(unsigned cpu,
cpu               287 drivers/xen/events/events_fifo.c 	struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
cpu               327 drivers/xen/events/events_fifo.c static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
cpu               333 drivers/xen/events/events_fifo.c 	control_block = per_cpu(cpu_control_block, cpu);
cpu               339 drivers/xen/events/events_fifo.c 		consume_one_event(cpu, control_block, q, &ready, drop);
cpu               344 drivers/xen/events/events_fifo.c static void evtchn_fifo_handle_events(unsigned cpu)
cpu               346 drivers/xen/events/events_fifo.c 	__evtchn_fifo_handle_events(cpu, false);
cpu               351 drivers/xen/events/events_fifo.c 	unsigned cpu;
cpu               353 drivers/xen/events/events_fifo.c 	for_each_possible_cpu(cpu) {
cpu               354 drivers/xen/events/events_fifo.c 		void *control_block = per_cpu(cpu_control_block, cpu);
cpu               365 drivers/xen/events/events_fifo.c 		if (!cpu_online(cpu)) {
cpu               367 drivers/xen/events/events_fifo.c 			per_cpu(cpu_control_block, cpu) = NULL;
cpu               371 drivers/xen/events/events_fifo.c 		ret = init_control_block(cpu, control_block);
cpu               398 drivers/xen/events/events_fifo.c static int evtchn_fifo_alloc_control_block(unsigned cpu)
cpu               407 drivers/xen/events/events_fifo.c 	ret = init_control_block(cpu, control_block);
cpu               411 drivers/xen/events/events_fifo.c 	per_cpu(cpu_control_block, cpu) = control_block;
cpu               420 drivers/xen/events/events_fifo.c static int xen_evtchn_cpu_prepare(unsigned int cpu)
cpu               422 drivers/xen/events/events_fifo.c 	if (!per_cpu(cpu_control_block, cpu))
cpu               423 drivers/xen/events/events_fifo.c 		return evtchn_fifo_alloc_control_block(cpu);
cpu               427 drivers/xen/events/events_fifo.c static int xen_evtchn_cpu_dead(unsigned int cpu)
cpu               429 drivers/xen/events/events_fifo.c 	__evtchn_fifo_handle_events(cpu, true);
cpu               435 drivers/xen/events/events_fifo.c 	int cpu = smp_processor_id();
cpu               438 drivers/xen/events/events_fifo.c 	ret = evtchn_fifo_alloc_control_block(cpu);
cpu                37 drivers/xen/events/events_internal.h 	unsigned short cpu;	/* cpu bound */
cpu                61 drivers/xen/events/events_internal.h 	void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
cpu                70 drivers/xen/events/events_internal.h 	void (*handle_events)(unsigned cpu);
cpu               100 drivers/xen/events/events_internal.h 					       unsigned cpu)
cpu               102 drivers/xen/events/events_internal.h 	evtchn_ops->bind_to_cpu(info, cpu);
cpu               135 drivers/xen/events/events_internal.h static inline void xen_evtchn_handle_events(unsigned cpu)
cpu               137 drivers/xen/events/events_internal.h 	return evtchn_ops->handle_events(cpu);
cpu               249 drivers/xen/mcelog.c 	m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
cpu                99 drivers/xen/pcpu.c 	struct pcpu *cpu = container_of(dev, struct pcpu, dev);
cpu               101 drivers/xen/pcpu.c 	return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
cpu               269 drivers/xen/pcpu.c static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
cpu               277 drivers/xen/pcpu.c 		.u.pcpu_info.xen_cpuid = cpu,
cpu               288 drivers/xen/pcpu.c 	pcpu = get_pcpu(cpu);
cpu               316 drivers/xen/pcpu.c 	uint32_t cpu = 0, max_cpu = 0;
cpu               322 drivers/xen/pcpu.c 	while (!err && (cpu <= max_cpu)) {
cpu               323 drivers/xen/pcpu.c 		err = sync_pcpu(cpu, &max_cpu);
cpu               324 drivers/xen/pcpu.c 		cpu++;
cpu                55 drivers/xen/time.c 			      struct vcpu_runstate_info *res, unsigned int cpu)
cpu                62 drivers/xen/time.c 	state = per_cpu_ptr(&xen_runstate, cpu);
cpu                74 drivers/xen/time.c 					  unsigned int cpu)
cpu                78 drivers/xen/time.c 	xen_get_runstate_snapshot_cpu_delta(res, cpu);
cpu                81 drivers/xen/time.c 		res->time[i] += per_cpu(old_runstate_time, cpu)[i];
cpu                88 drivers/xen/time.c 	int cpu, i;
cpu               105 drivers/xen/time.c 		for_each_possible_cpu(cpu) {
cpu               106 drivers/xen/time.c 			xen_get_runstate_snapshot_cpu_delta(&state, cpu);
cpu               107 drivers/xen/time.c 			memcpy(runstate_delta[cpu].time, state.time,
cpu               108 drivers/xen/time.c 					sizeof(runstate_delta[cpu].time));
cpu               120 drivers/xen/time.c 		for_each_possible_cpu(cpu) {
cpu               122 drivers/xen/time.c 				per_cpu(old_runstate_time, cpu)[i] +=
cpu               123 drivers/xen/time.c 					runstate_delta[cpu].time[i];
cpu               152 drivers/xen/time.c u64 xen_steal_clock(int cpu)
cpu               156 drivers/xen/time.c 	xen_get_runstate_snapshot_cpu(&state, cpu);
cpu               160 drivers/xen/time.c void xen_setup_runstate_info(int cpu)
cpu               164 drivers/xen/time.c 	area.addr.v = &per_cpu(xen_runstate, cpu);
cpu               167 drivers/xen/time.c 			       xen_vcpu_nr(cpu), &area))
cpu               657 drivers/xen/xen-scsiback.c 	int tag, cpu, i;
cpu               659 drivers/xen/xen-scsiback.c 	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
cpu               668 drivers/xen/xen-scsiback.c 	req->se_cmd.map_cpu = cpu;
cpu               105 fs/aio.c       	struct __percpu kioctx_cpu *cpu;
cpu               590 fs/aio.c       	free_percpu(ctx->cpu);
cpu               757 fs/aio.c       	ctx->cpu = alloc_percpu(struct kioctx_cpu);
cpu               758 fs/aio.c       	if (!ctx->cpu)
cpu               804 fs/aio.c       	free_percpu(ctx->cpu);
cpu               909 fs/aio.c       	kcpu = this_cpu_ptr(ctx->cpu);
cpu               927 fs/aio.c       	kcpu = this_cpu_ptr(ctx->cpu);
cpu              1836 fs/btrfs/ctree.h static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu,
cpu              1839 fs/btrfs/ctree.h 	cpu->offset = le64_to_cpu(disk->offset);
cpu              1840 fs/btrfs/ctree.h 	cpu->type = disk->type;
cpu              1841 fs/btrfs/ctree.h 	cpu->objectid = le64_to_cpu(disk->objectid);
cpu              1845 fs/btrfs/ctree.h 					 const struct btrfs_key *cpu)
cpu              1847 fs/btrfs/ctree.h 	disk->offset = cpu_to_le64(cpu->offset);
cpu              1848 fs/btrfs/ctree.h 	disk->type = cpu->type;
cpu              1849 fs/btrfs/ctree.h 	disk->objectid = cpu_to_le64(cpu->objectid);
cpu              2073 fs/btrfs/ctree.h btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
cpu              2076 fs/btrfs/ctree.h 	memset(cpu, 0, sizeof(*cpu));
cpu              2078 fs/btrfs/ctree.h 	cpu->profiles = le64_to_cpu(disk->profiles);
cpu              2079 fs/btrfs/ctree.h 	cpu->usage = le64_to_cpu(disk->usage);
cpu              2080 fs/btrfs/ctree.h 	cpu->devid = le64_to_cpu(disk->devid);
cpu              2081 fs/btrfs/ctree.h 	cpu->pstart = le64_to_cpu(disk->pstart);
cpu              2082 fs/btrfs/ctree.h 	cpu->pend = le64_to_cpu(disk->pend);
cpu              2083 fs/btrfs/ctree.h 	cpu->vstart = le64_to_cpu(disk->vstart);
cpu              2084 fs/btrfs/ctree.h 	cpu->vend = le64_to_cpu(disk->vend);
cpu              2085 fs/btrfs/ctree.h 	cpu->target = le64_to_cpu(disk->target);
cpu              2086 fs/btrfs/ctree.h 	cpu->flags = le64_to_cpu(disk->flags);
cpu              2087 fs/btrfs/ctree.h 	cpu->limit = le64_to_cpu(disk->limit);
cpu              2088 fs/btrfs/ctree.h 	cpu->stripes_min = le32_to_cpu(disk->stripes_min);
cpu              2089 fs/btrfs/ctree.h 	cpu->stripes_max = le32_to_cpu(disk->stripes_max);
cpu              2094 fs/btrfs/ctree.h 			       const struct btrfs_balance_args *cpu)
cpu              2098 fs/btrfs/ctree.h 	disk->profiles = cpu_to_le64(cpu->profiles);
cpu              2099 fs/btrfs/ctree.h 	disk->usage = cpu_to_le64(cpu->usage);
cpu              2100 fs/btrfs/ctree.h 	disk->devid = cpu_to_le64(cpu->devid);
cpu              2101 fs/btrfs/ctree.h 	disk->pstart = cpu_to_le64(cpu->pstart);
cpu              2102 fs/btrfs/ctree.h 	disk->pend = cpu_to_le64(cpu->pend);
cpu              2103 fs/btrfs/ctree.h 	disk->vstart = cpu_to_le64(cpu->vstart);
cpu              2104 fs/btrfs/ctree.h 	disk->vend = cpu_to_le64(cpu->vend);
cpu              2105 fs/btrfs/ctree.h 	disk->target = cpu_to_le64(cpu->target);
cpu              2106 fs/btrfs/ctree.h 	disk->flags = cpu_to_le64(cpu->flags);
cpu              2107 fs/btrfs/ctree.h 	disk->limit = cpu_to_le64(cpu->limit);
cpu              2108 fs/btrfs/ctree.h 	disk->stripes_min = cpu_to_le32(cpu->stripes_min);
cpu              2109 fs/btrfs/ctree.h 	disk->stripes_max = cpu_to_le32(cpu->stripes_max);
cpu              1392 fs/buffer.c    static bool has_bh_in_lru(int cpu, void *dummy)
cpu              1394 fs/buffer.c    	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
cpu              3379 fs/buffer.c    static int buffer_exit_cpu_dead(unsigned int cpu)
cpu              3382 fs/buffer.c    	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
cpu              3388 fs/buffer.c    	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
cpu              3389 fs/buffer.c    	per_cpu(bh_accounting, cpu).nr = 0;
cpu               103 fs/fscache/main.c 	unsigned int cpu;
cpu               126 fs/fscache/main.c 	for_each_possible_cpu(cpu)
cpu               127 fs/fscache/main.c 		init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));
cpu              1927 fs/gfs2/rgrp.c 	int cpu, nonzero = 0;
cpu              1930 fs/gfs2/rgrp.c 	for_each_present_cpu(cpu) {
cpu              1931 fs/gfs2/rgrp.c 		st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
cpu              3254 fs/io_uring.c  			int cpu = p->sq_thread_cpu;
cpu              3257 fs/io_uring.c  			if (cpu >= nr_cpu_ids)
cpu              3259 fs/io_uring.c  			if (!cpu_online(cpu))
cpu              3263 fs/io_uring.c  							ctx, cpu,
cpu               163 fs/namespace.c 	int cpu;
cpu               165 fs/namespace.c 	for_each_possible_cpu(cpu) {
cpu               166 fs/namespace.c 		count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
cpu               274 fs/namespace.c 	int cpu;
cpu               276 fs/namespace.c 	for_each_possible_cpu(cpu) {
cpu               277 fs/namespace.c 		count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
cpu               824 fs/nfs/super.c 	int i, cpu;
cpu               875 fs/nfs/super.c 	for_each_possible_cpu(cpu) {
cpu               879 fs/nfs/super.c 		stats = per_cpu_ptr(nfss->io_stats, cpu);
cpu                18 fs/proc/stat.c #define arch_irq_stat_cpu(cpu) 0
cpu                26 fs/proc/stat.c static u64 get_idle_time(struct kernel_cpustat *kcs, int cpu)
cpu                31 fs/proc/stat.c 	if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
cpu                32 fs/proc/stat.c 		idle += arch_idle_time(cpu);
cpu                36 fs/proc/stat.c static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu)
cpu                41 fs/proc/stat.c 	if (cpu_online(cpu) && nr_iowait_cpu(cpu))
cpu                42 fs/proc/stat.c 		iowait += arch_idle_time(cpu);
cpu                48 fs/proc/stat.c static u64 get_idle_time(struct kernel_cpustat *kcs, int cpu)
cpu                52 fs/proc/stat.c 	if (cpu_online(cpu))
cpu                53 fs/proc/stat.c 		idle_usecs = get_cpu_idle_time_us(cpu, NULL);
cpu                64 fs/proc/stat.c static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu)
cpu                68 fs/proc/stat.c 	if (cpu_online(cpu))
cpu                69 fs/proc/stat.c 		iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
cpu              1063 fs/seq_file.c  seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos)
cpu              1067 fs/seq_file.c  	for_each_possible_cpu(*cpu) {
cpu              1068 fs/seq_file.c  		hlist_for_each(node, per_cpu_ptr(head, *cpu)) {
cpu              1088 fs/seq_file.c  			int *cpu, loff_t *pos)
cpu              1097 fs/seq_file.c  	for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids;
cpu              1098 fs/seq_file.c  	     *cpu = cpumask_next(*cpu, cpu_possible_mask)) {
cpu              1099 fs/seq_file.c  		struct hlist_head *bucket = per_cpu_ptr(head, *cpu);
cpu                31 fs/squashfs/decompressor_multi_percpu.c 	int err, cpu;
cpu                37 fs/squashfs/decompressor_multi_percpu.c 	for_each_possible_cpu(cpu) {
cpu                38 fs/squashfs/decompressor_multi_percpu.c 		stream = per_cpu_ptr(percpu, cpu);
cpu                50 fs/squashfs/decompressor_multi_percpu.c 	for_each_possible_cpu(cpu) {
cpu                51 fs/squashfs/decompressor_multi_percpu.c 		stream = per_cpu_ptr(percpu, cpu);
cpu                64 fs/squashfs/decompressor_multi_percpu.c 	int cpu;
cpu                67 fs/squashfs/decompressor_multi_percpu.c 		for_each_possible_cpu(cpu) {
cpu                68 fs/squashfs/decompressor_multi_percpu.c 			stream = per_cpu_ptr(percpu, cpu);
cpu                12 fs/xfs/xfs_stats.c 	int val = 0, cpu;
cpu                14 fs/xfs/xfs_stats.c 	for_each_possible_cpu(cpu)
cpu                15 fs/xfs/xfs_stats.c 		val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
cpu               127 include/acpi/cppc_acpi.h 	int cpu;
cpu               137 include/acpi/cppc_acpi.h extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
cpu               138 include/acpi/cppc_acpi.h extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
cpu               139 include/acpi/cppc_acpi.h extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
cpu               141 include/acpi/cppc_acpi.h extern unsigned int cppc_get_transition_latency(int cpu);
cpu               254 include/acpi/processor.h 					       *performance, unsigned int cpu);
cpu               255 include/acpi/processor.h extern void acpi_processor_unregister_performance(unsigned int cpu);
cpu               273 include/acpi/processor.h 					unsigned int cpu);
cpu               274 include/acpi/processor.h int acpi_processor_ffh_cstate_probe(unsigned int cpu,
cpu               281 include/acpi/processor.h 						      *flags, unsigned int cpu)
cpu               286 include/acpi/processor.h static inline int acpi_processor_ffh_cstate_probe(unsigned int cpu,
cpu               300 include/acpi/processor.h static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg,
cpu               303 include/acpi/processor.h 	if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
cpu               305 include/acpi/processor.h 	return work_on_cpu(cpu, fn, arg);
cpu               316 include/acpi/processor.h extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
cpu               342 include/acpi/processor.h static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
cpu               134 include/asm-generic/mshyperv.h 	int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
cpu               151 include/asm-generic/mshyperv.h 	for_each_cpu(cpu, cpus) {
cpu               152 include/asm-generic/mshyperv.h 		vcpu = hv_cpu_number_to_vp_number(cpu);
cpu                31 include/asm-generic/preempt.h #define init_idle_preempt_count(p, cpu) do { \
cpu                35 include/asm-generic/topology.h #define cpu_to_node(cpu)	((void)(cpu),0)
cpu                41 include/asm-generic/topology.h #define set_cpu_numa_node(cpu, node)
cpu                44 include/asm-generic/topology.h #define cpu_to_mem(cpu)		((void)(cpu),0)
cpu                72 include/asm-generic/topology.h #define set_cpu_numa_mem(cpu, node)
cpu                97 include/asm-generic/vmlinux.lds.h #define CPU_KEEP(sec)    *(.cpu##sec)
cpu               101 include/asm-generic/vmlinux.lds.h #define CPU_DISCARD(sec) *(.cpu##sec)
cpu                26 include/clocksource/hyperv_timer.h extern void hv_stimer_init(unsigned int cpu);
cpu                27 include/clocksource/hyperv_timer.h extern void hv_stimer_cleanup(unsigned int cpu);
cpu               287 include/linux/acpi.h int acpi_unmap_cpu(int cpu);
cpu              1269 include/linux/acpi.h int acpi_pptt_cpu_is_thread(unsigned int cpu);
cpu              1270 include/linux/acpi.h int find_acpi_cpu_topology(unsigned int cpu, int level);
cpu              1271 include/linux/acpi.h int find_acpi_cpu_topology_package(unsigned int cpu);
cpu              1272 include/linux/acpi.h int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
cpu              1273 include/linux/acpi.h int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
cpu              1275 include/linux/acpi.h static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
cpu              1279 include/linux/acpi.h static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
cpu              1283 include/linux/acpi.h static inline int find_acpi_cpu_topology_package(unsigned int cpu)
cpu              1287 include/linux/acpi.h static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
cpu              1291 include/linux/acpi.h static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
cpu               185 include/linux/amd-iommu.h amd_iommu_update_ga(int cpu, bool is_run, void *data);
cpu               199 include/linux/amd-iommu.h amd_iommu_update_ga(int cpu, bool is_run, void *data)
cpu                15 include/linux/arch_topology.h bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
cpu                21 include/linux/arch_topology.h unsigned long topology_get_cpu_scale(int cpu)
cpu                23 include/linux/arch_topology.h 	return per_cpu(cpu_scale, cpu);
cpu                26 include/linux/arch_topology.h void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
cpu                31 include/linux/arch_topology.h unsigned long topology_get_freq_scale(int cpu)
cpu                33 include/linux/arch_topology.h 	return per_cpu(freq_scale, cpu);
cpu                49 include/linux/arch_topology.h #define topology_physical_package_id(cpu)	(cpu_topology[cpu].package_id)
cpu                50 include/linux/arch_topology.h #define topology_core_id(cpu)		(cpu_topology[cpu].core_id)
cpu                51 include/linux/arch_topology.h #define topology_core_cpumask(cpu)	(&cpu_topology[cpu].core_sibling)
cpu                52 include/linux/arch_topology.h #define topology_sibling_cpumask(cpu)	(&cpu_topology[cpu].thread_sibling)
cpu                53 include/linux/arch_topology.h #define topology_llc_cpumask(cpu)	(&cpu_topology[cpu].llc_sibling)
cpu                56 include/linux/arch_topology.h const struct cpumask *cpu_coregroup_mask(int cpu);
cpu                57 include/linux/arch_topology.h void update_siblings_masks(unsigned int cpu);
cpu              1500 include/linux/blkdev.h int kblockd_schedule_work_on(int cpu, struct work_struct *work);
cpu              1501 include/linux/blkdev.h int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
cpu                88 include/linux/cacheinfo.h 	int cpu = smp_processor_id();				\
cpu                89 include/linux/cacheinfo.h 	*(int *)ret = __##func(cpu);				\
cpu                92 include/linux/cacheinfo.h int func(unsigned int cpu)					\
cpu                95 include/linux/cacheinfo.h 	smp_call_function_single(cpu, _##func, &ret, true);	\
cpu                99 include/linux/cacheinfo.h struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
cpu               100 include/linux/cacheinfo.h int init_cache_level(unsigned int cpu);
cpu               101 include/linux/cacheinfo.h int populate_cache_leaves(unsigned int cpu);
cpu               102 include/linux/cacheinfo.h int cache_setup_acpi(unsigned int cpu);
cpu               112 include/linux/cacheinfo.h static inline int acpi_find_last_cache_level(unsigned int cpu)
cpu               117 include/linux/cacheinfo.h int acpi_find_last_cache_level(unsigned int cpu);
cpu               636 include/linux/cgroup-defs.h 	void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
cpu               752 include/linux/cgroup.h void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
cpu                17 include/linux/cgroup_subsys.h SUBSYS(cpu)
cpu                33 include/linux/clk/tegra.h 	void (*wait_for_reset)(u32 cpu);
cpu                34 include/linux/clk/tegra.h 	void (*put_in_reset)(u32 cpu);
cpu                35 include/linux/clk/tegra.h 	void (*out_of_reset)(u32 cpu);
cpu                36 include/linux/clk/tegra.h 	void (*enable_clock)(u32 cpu);
cpu                37 include/linux/clk/tegra.h 	void (*disable_clock)(u32 cpu);
cpu                47 include/linux/clk/tegra.h static inline void tegra_wait_cpu_in_reset(u32 cpu)
cpu                52 include/linux/clk/tegra.h 	tegra_cpu_car_ops->wait_for_reset(cpu);
cpu                55 include/linux/clk/tegra.h static inline void tegra_put_cpu_in_reset(u32 cpu)
cpu                60 include/linux/clk/tegra.h 	tegra_cpu_car_ops->put_in_reset(cpu);
cpu                63 include/linux/clk/tegra.h static inline void tegra_cpu_out_of_reset(u32 cpu)
cpu                68 include/linux/clk/tegra.h 	tegra_cpu_car_ops->out_of_reset(cpu);
cpu                71 include/linux/clk/tegra.h static inline void tegra_enable_cpu_clock(u32 cpu)
cpu                76 include/linux/clk/tegra.h 	tegra_cpu_car_ops->enable_clock(cpu);
cpu                79 include/linux/clk/tegra.h static inline void tegra_disable_cpu_clock(u32 cpu)
cpu                84 include/linux/clk/tegra.h 	tegra_cpu_car_ops->disable_clock(cpu);
cpu               184 include/linux/clockchips.h extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu);
cpu                12 include/linux/context_tracking.h extern void context_tracking_cpu_set(int cpu);
cpu                25 include/linux/coresight-pmu.h static inline int coresight_get_trace_id(int cpu)
cpu                33 include/linux/coresight-pmu.h 	return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
cpu                37 include/linux/cpu.h extern int register_cpu(struct cpu *cpu, int num);
cpu                38 include/linux/cpu.h extern struct device *get_cpu_device(unsigned cpu);
cpu                39 include/linux/cpu.h extern bool cpu_is_hotpluggable(unsigned cpu);
cpu                40 include/linux/cpu.h extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
cpu                42 include/linux/cpu.h 					      int cpu, unsigned int *thread);
cpu                73 include/linux/cpu.h extern void unregister_cpu(struct cpu *cpu);
cpu                91 include/linux/cpu.h int cpu_up(unsigned int cpu);
cpu                92 include/linux/cpu.h void notify_cpu_starting(unsigned int cpu);
cpu               119 include/linux/cpu.h void clear_tasks_mm_cpumask(int cpu);
cpu               120 include/linux/cpu.h int cpu_down(unsigned int cpu);
cpu               156 include/linux/cpu.h 	int cpu = 0;
cpu               159 include/linux/cpu.h 		cpu = -1;
cpu               161 include/linux/cpu.h 	return freeze_secondary_cpus(cpu);
cpu               190 include/linux/cpu.h int cpu_report_state(int cpu);
cpu               191 include/linux/cpu.h int cpu_check_up_prepare(int cpu);
cpu               192 include/linux/cpu.h void cpu_set_state_online(int cpu);
cpu               196 include/linux/cpu.h bool cpu_wait_death(unsigned int cpu, int seconds);
cpu                42 include/linux/cpu_rmap.h static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
cpu                44 include/linux/cpu_rmap.h 	return rmap->near[cpu].index;
cpu                47 include/linux/cpu_rmap.h static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
cpu                49 include/linux/cpu_rmap.h 	return rmap->obj[rmap->near[cpu].index];
cpu                47 include/linux/cpufeature.h MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x);		\
cpu                59 include/linux/cpufreq.h 	unsigned int		cpu;    /* cpu managing this policy, must be online */
cpu               160 include/linux/cpufreq.h 	unsigned int			cpu;
cpu               179 include/linux/cpufreq.h struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
cpu               180 include/linux/cpufreq.h struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
cpu               183 include/linux/cpufreq.h static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
cpu               187 include/linux/cpufreq.h static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
cpu               208 include/linux/cpufreq.h unsigned int cpufreq_get(unsigned int cpu);
cpu               209 include/linux/cpufreq.h unsigned int cpufreq_quick_get(unsigned int cpu);
cpu               210 include/linux/cpufreq.h unsigned int cpufreq_quick_get_max(unsigned int cpu);
cpu               213 include/linux/cpufreq.h u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
cpu               215 include/linux/cpufreq.h struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
cpu               217 include/linux/cpufreq.h int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
cpu               219 include/linux/cpufreq.h void cpufreq_update_policy(unsigned int cpu);
cpu               220 include/linux/cpufreq.h void cpufreq_update_limits(unsigned int cpu);
cpu               226 include/linux/cpufreq.h static inline unsigned int cpufreq_get(unsigned int cpu)
cpu               230 include/linux/cpufreq.h static inline unsigned int cpufreq_quick_get(unsigned int cpu)
cpu               234 include/linux/cpufreq.h static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
cpu               346 include/linux/cpufreq.h 	unsigned int	(*get)(unsigned int cpu);
cpu               349 include/linux/cpufreq.h 	void		(*update_limits)(unsigned int cpu);
cpu               352 include/linux/cpufreq.h 	int		(*bios_limit)(int cpu, unsigned int *limit);
cpu               989 include/linux/cpufreq.h extern unsigned int arch_freq_get_on_cpu(int cpu);
cpu              1000 include/linux/cpufreq.h unsigned int cpufreq_generic_get(unsigned int cpu);
cpu               190 include/linux/cpuhotplug.h 			int (*startup)(unsigned int cpu),
cpu               191 include/linux/cpuhotplug.h 			int (*teardown)(unsigned int cpu), bool multi_instance);
cpu               195 include/linux/cpuhotplug.h 				   int (*startup)(unsigned int cpu),
cpu               196 include/linux/cpuhotplug.h 				   int (*teardown)(unsigned int cpu),
cpu               210 include/linux/cpuhotplug.h 				    int (*startup)(unsigned int cpu),
cpu               211 include/linux/cpuhotplug.h 				    int (*teardown)(unsigned int cpu))
cpu               218 include/linux/cpuhotplug.h 					       int (*startup)(unsigned int cpu),
cpu               219 include/linux/cpuhotplug.h 					       int (*teardown)(unsigned int cpu))
cpu               238 include/linux/cpuhotplug.h 					    int (*startup)(unsigned int cpu),
cpu               239 include/linux/cpuhotplug.h 					    int (*teardown)(unsigned int cpu))
cpu               247 include/linux/cpuhotplug.h 						     int (*startup)(unsigned int cpu),
cpu               248 include/linux/cpuhotplug.h 						     int (*teardown)(unsigned int cpu))
cpu               268 include/linux/cpuhotplug.h 					  int (*startup)(unsigned int cpu,
cpu               270 include/linux/cpuhotplug.h 					  int (*teardown)(unsigned int cpu,
cpu                85 include/linux/cpuidle.h 	unsigned int		cpu;
cpu               263 include/linux/cpuidle.h extern int cpuidle_governor_latency_req(unsigned int cpu);
cpu                 8 include/linux/cpuidle_haltpoll.h static inline void arch_haltpoll_enable(unsigned int cpu)
cpu                12 include/linux/cpuidle_haltpoll.h static inline void arch_haltpoll_disable(unsigned int cpu)
cpu               117 include/linux/cpumask.h #define cpu_online(cpu)		cpumask_test_cpu((cpu), cpu_online_mask)
cpu               118 include/linux/cpumask.h #define cpu_possible(cpu)	cpumask_test_cpu((cpu), cpu_possible_mask)
cpu               119 include/linux/cpumask.h #define cpu_present(cpu)	cpumask_test_cpu((cpu), cpu_present_mask)
cpu               120 include/linux/cpumask.h #define cpu_active(cpu)		cpumask_test_cpu((cpu), cpu_active_mask)
cpu               126 include/linux/cpumask.h #define cpu_online(cpu)		((cpu) == 0)
cpu               127 include/linux/cpumask.h #define cpu_possible(cpu)	((cpu) == 0)
cpu               128 include/linux/cpumask.h #define cpu_present(cpu)	((cpu) == 0)
cpu               129 include/linux/cpumask.h #define cpu_active(cpu)		((cpu) == 0)
cpu               134 include/linux/cpumask.h static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
cpu               137 include/linux/cpumask.h 	WARN_ON_ONCE(cpu >= bits);
cpu               142 include/linux/cpumask.h static inline unsigned int cpumask_check(unsigned int cpu)
cpu               144 include/linux/cpumask.h 	cpu_max_bits_warn(cpu, nr_cpumask_bits);
cpu               145 include/linux/cpumask.h 	return cpu;
cpu               187 include/linux/cpumask.h 					   unsigned int cpu)
cpu               197 include/linux/cpumask.h #define for_each_cpu(cpu, mask)			\
cpu               198 include/linux/cpumask.h 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
cpu               199 include/linux/cpumask.h #define for_each_cpu_not(cpu, mask)		\
cpu               200 include/linux/cpumask.h 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
cpu               201 include/linux/cpumask.h #define for_each_cpu_wrap(cpu, mask, start)	\
cpu               202 include/linux/cpumask.h 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
cpu               203 include/linux/cpumask.h #define for_each_cpu_and(cpu, mask1, mask2)	\
cpu               204 include/linux/cpumask.h 	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
cpu               246 include/linux/cpumask.h int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
cpu               256 include/linux/cpumask.h #define for_each_cpu(cpu, mask)				\
cpu               257 include/linux/cpumask.h 	for ((cpu) = -1;				\
cpu               258 include/linux/cpumask.h 		(cpu) = cpumask_next((cpu), (mask)),	\
cpu               259 include/linux/cpumask.h 		(cpu) < nr_cpu_ids;)
cpu               268 include/linux/cpumask.h #define for_each_cpu_not(cpu, mask)				\
cpu               269 include/linux/cpumask.h 	for ((cpu) = -1;					\
cpu               270 include/linux/cpumask.h 		(cpu) = cpumask_next_zero((cpu), (mask)),	\
cpu               271 include/linux/cpumask.h 		(cpu) < nr_cpu_ids;)
cpu               285 include/linux/cpumask.h #define for_each_cpu_wrap(cpu, mask, start)					\
cpu               286 include/linux/cpumask.h 	for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false);	\
cpu               287 include/linux/cpumask.h 	     (cpu) < nr_cpumask_bits;						\
cpu               288 include/linux/cpumask.h 	     (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
cpu               304 include/linux/cpumask.h #define for_each_cpu_and(cpu, mask1, mask2)				\
cpu               305 include/linux/cpumask.h 	for ((cpu) = -1;						\
cpu               306 include/linux/cpumask.h 		(cpu) = cpumask_next_and((cpu), (mask1), (mask2)),	\
cpu               307 include/linux/cpumask.h 		(cpu) < nr_cpu_ids;)
cpu               325 include/linux/cpumask.h static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
cpu               327 include/linux/cpumask.h 	set_bit(cpumask_check(cpu), cpumask_bits(dstp));
cpu               330 include/linux/cpumask.h static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
cpu               332 include/linux/cpumask.h 	__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
cpu               341 include/linux/cpumask.h static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
cpu               343 include/linux/cpumask.h 	clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
cpu               346 include/linux/cpumask.h static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
cpu               348 include/linux/cpumask.h 	__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
cpu               358 include/linux/cpumask.h static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
cpu               360 include/linux/cpumask.h 	return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
cpu               372 include/linux/cpumask.h static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
cpu               374 include/linux/cpumask.h 	return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
cpu               386 include/linux/cpumask.h static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
cpu               388 include/linux/cpumask.h 	return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
cpu               626 include/linux/cpumask.h #define cpumask_of(cpu) (get_cpu_mask(cpu))
cpu               806 include/linux/cpumask.h #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
cpu               807 include/linux/cpumask.h #define for_each_online_cpu(cpu)   for_each_cpu((cpu), cpu_online_mask)
cpu               808 include/linux/cpumask.h #define for_each_present_cpu(cpu)  for_each_cpu((cpu), cpu_present_mask)
cpu               821 include/linux/cpumask.h set_cpu_possible(unsigned int cpu, bool possible)
cpu               824 include/linux/cpumask.h 		cpumask_set_cpu(cpu, &__cpu_possible_mask);
cpu               826 include/linux/cpumask.h 		cpumask_clear_cpu(cpu, &__cpu_possible_mask);
cpu               830 include/linux/cpumask.h set_cpu_present(unsigned int cpu, bool present)
cpu               833 include/linux/cpumask.h 		cpumask_set_cpu(cpu, &__cpu_present_mask);
cpu               835 include/linux/cpumask.h 		cpumask_clear_cpu(cpu, &__cpu_present_mask);
cpu               838 include/linux/cpumask.h void set_cpu_online(unsigned int cpu, bool online);
cpu               841 include/linux/cpumask.h set_cpu_active(unsigned int cpu, bool active)
cpu               844 include/linux/cpumask.h 		cpumask_set_cpu(cpu, &__cpu_active_mask);
cpu               846 include/linux/cpumask.h 		cpumask_clear_cpu(cpu, &__cpu_active_mask);
cpu               879 include/linux/cpumask.h static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
cpu               881 include/linux/cpumask.h 	const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
cpu               882 include/linux/cpumask.h 	p -= cpu / BITS_PER_LONG;
cpu               886 include/linux/cpumask.h #define cpu_is_offline(cpu)	unlikely(!cpu_online(cpu))
cpu                36 include/linux/dca.h 				     int cpu);
cpu                55 include/linux/dca.h u8 dca_get_tag(int cpu);
cpu                56 include/linux/dca.h u8 dca3_get_tag(struct device *dev, int cpu);
cpu                43 include/linux/dw_apb_timer.h dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
cpu                62 include/linux/energy_model.h 	int (*active_power)(unsigned long *power, unsigned long *freq, int cpu);
cpu                66 include/linux/energy_model.h struct em_perf_domain *em_cpu_get(int cpu);
cpu                84 include/linux/energy_model.h 	int i, cpu;
cpu                91 include/linux/energy_model.h 	cpu = cpumask_first(to_cpumask(pd->cpus));
cpu                92 include/linux/energy_model.h 	scale_cpu = arch_scale_cpu_capacity(cpu);
cpu               172 include/linux/energy_model.h static inline struct em_perf_domain *em_cpu_get(int cpu)
cpu               812 include/linux/ftrace.h extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
cpu               829 include/linux/ftrace.h static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
cpu               306 include/linux/genhd.h #define part_stat_get_cpu(part, field, cpu)					\
cpu               307 include/linux/genhd.h 	(per_cpu_ptr((part)->dkstats, (cpu))->field)
cpu               348 include/linux/genhd.h #define part_stat_get_cpu(part, field, cpu)	part_stat_get(part, field)
cpu               398 include/linux/genhd.h #define part_stat_local_read_cpu(gendiskp, field, cpu)			\
cpu               399 include/linux/genhd.h 	local_read(&(part_stat_get_cpu(gendiskp, field, cpu)))
cpu               215 include/linux/hrtimer.h 	unsigned int			cpu;
cpu               532 include/linux/hrtimer.h int hrtimers_prepare_cpu(unsigned int cpu);
cpu               534 include/linux/hrtimer.h int hrtimers_dead_cpu(unsigned int cpu);
cpu                67 include/linux/hw_breakpoint.h 				int cpu);
cpu               111 include/linux/hw_breakpoint.h 				int cpu)		{ return NULL; }
cpu                15 include/linux/hypervisor.h static inline void hypervisor_pin_vcpu(int cpu)
cpu                17 include/linux/hypervisor.h 	x86_platform.hyper.pin_vcpu(cpu);
cpu                24 include/linux/hypervisor.h static inline void hypervisor_pin_vcpu(int cpu)
cpu               121 include/linux/intel_rapl.h 	int (*read_raw)(int cpu, struct reg_action *ra);
cpu               122 include/linux/intel_rapl.h 	int (*write_raw)(int cpu, struct reg_action *ra);
cpu               148 include/linux/intel_rapl.h struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv);
cpu               149 include/linux/intel_rapl.h struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv);
cpu               672 include/linux/interrupt.h extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
cpu               165 include/linux/iova.h void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
cpu               269 include/linux/iova.h static inline void free_cpu_cached_iovas(unsigned int cpu,
cpu               510 include/linux/irq.h 	void		(*ipi_send_single)(struct irq_data *data, unsigned int cpu);
cpu               576 include/linux/irq.h extern int irq_affinity_online_cpu(unsigned int cpu);
cpu              1193 include/linux/irq.h void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
cpu              1203 include/linux/irq.h irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
cpu              1204 include/linux/irq.h int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
cpu              1206 include/linux/irq.h int ipi_send_single(unsigned int virq, unsigned int cpu);
cpu                22 include/linux/irq_cpustat.h #define __IRQ_STAT(cpu, member)	(per_cpu(irq_stat.member, cpu))
cpu                26 include/linux/irq_cpustat.h #define nmi_count(cpu)		__IRQ_STAT((cpu), __nmi_count)	/* i386 */
cpu                40 include/linux/irq_work.h bool irq_work_queue_on(struct irq_work *work, int cpu);
cpu               158 include/linux/irqchip/arm-gic.h void gic_init(void __iomem *dist , void __iomem *cpu);
cpu               161 include/linux/irqchip/arm-gic.h int gic_get_cpu_id(unsigned int cpu);
cpu               180 include/linux/kdb.h 	unsigned int cpu = task_cpu(p);
cpu               181 include/linux/kdb.h 	if (cpu > num_possible_cpus())
cpu               182 include/linux/kdb.h 		cpu = 0;
cpu               183 include/linux/kdb.h 	return cpu;
cpu                49 include/linux/kernel_stat.h #define kstat_cpu(cpu) per_cpu(kstat, cpu)
cpu                50 include/linux/kernel_stat.h #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
cpu                54 include/linux/kernel_stat.h extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
cpu                62 include/linux/kernel_stat.h static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
cpu                64 include/linux/kernel_stat.h        return kstat_cpu(cpu).softirqs[irq];
cpu                76 include/linux/kernel_stat.h static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
cpu                78 include/linux/kernel_stat.h 	return kstat_cpu(cpu).irqs_sum;
cpu               309 include/linux/kexec.h void crash_save_cpu(struct pt_regs *regs, int cpu);
cpu               318 include/linux/kgdb.h extern int kgdb_nmicallback(int cpu, void *regs);
cpu               319 include/linux/kgdb.h extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
cpu                31 include/linux/kthread.h 					  unsigned int cpu,
cpu                53 include/linux/kthread.h void kthread_bind(struct task_struct *k, unsigned int cpu);
cpu               178 include/linux/kthread.h kthread_create_worker_on_cpu(int cpu, unsigned int flags,
cpu               268 include/linux/kvm_host.h 	int cpu;
cpu               859 include/linux/kvm_host.h void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
cpu               862 include/linux/kvm_host.h void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
cpu               162 include/linux/lockdep.h 	int				cpu;
cpu               612 include/linux/memcontrol.h 	int cpu;
cpu               614 include/linux/memcontrol.h 	for_each_possible_cpu(cpu)
cpu               615 include/linux/memcontrol.h 		x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
cpu               690 include/linux/memcontrol.h 	int cpu;
cpu               696 include/linux/memcontrol.h 	for_each_possible_cpu(cpu)
cpu               697 include/linux/memcontrol.h 		x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
cpu              1119 include/linux/mm.h static inline int cpu_pid_to_cpupid(int cpu, int pid)
cpu              1121 include/linux/mm.h 	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
cpu                15 include/linux/mvebu-pmsu.h int mvebu_pmsu_dfs_request(int cpu);
cpu                17 include/linux/mvebu-pmsu.h static inline int mvebu_pmsu_dfs_request(int cpu) { return -ENODEV; }
cpu               677 include/linux/netdevice.h 	u16 cpu;
cpu              3015 include/linux/netdevice.h 	unsigned int		cpu;
cpu              3904 include/linux/netdevice.h static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
cpu              3907 include/linux/netdevice.h 	txq->xmit_lock_owner = cpu;
cpu              3971 include/linux/netdevice.h 	int cpu;
cpu              3974 include/linux/netdevice.h 	cpu = smp_processor_id();
cpu              3984 include/linux/netdevice.h 		__netif_tx_lock(txq, cpu);
cpu              4019 include/linux/netdevice.h #define HARD_TX_LOCK(dev, txq, cpu) {			\
cpu              4021 include/linux/netdevice.h 		__netif_tx_lock(txq, cpu);		\
cpu              4043 include/linux/netdevice.h 	int cpu;
cpu              4046 include/linux/netdevice.h 	cpu = smp_processor_id();
cpu              4050 include/linux/netdevice.h 		__netif_tx_lock(txq, cpu);
cpu               441 include/linux/netfilter/x_tables.h xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
cpu               444 include/linux/netfilter/x_tables.h 		return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
cpu                49 include/linux/nmi.h extern int lockup_detector_online_cpu(unsigned int cpu);
cpu                50 include/linux/nmi.h extern int lockup_detector_offline_cpu(unsigned int cpu);
cpu               122 include/linux/nmi.h int watchdog_nmi_enable(unsigned int cpu);
cpu               123 include/linux/nmi.h void watchdog_nmi_disable(unsigned int cpu);
cpu               162 include/linux/nmi.h static inline bool trigger_single_cpu_backtrace(int cpu)
cpu               164 include/linux/nmi.h 	arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
cpu               187 include/linux/nmi.h static inline bool trigger_single_cpu_backtrace(int cpu)
cpu               138 include/linux/node.h extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
cpu               139 include/linux/node.h extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
cpu               163 include/linux/node.h static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid)
cpu               167 include/linux/node.h static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
cpu               300 include/linux/of.h extern int of_find_last_cache_level(unsigned int cpu);
cpu               352 include/linux/of.h extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
cpu               757 include/linux/of.h static inline struct device_node *of_get_cpu_node(int cpu,
cpu              1251 include/linux/of.h #define for_each_of_cpu_node(cpu) \
cpu              1252 include/linux/of.h 	for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \
cpu              1253 include/linux/of.h 	     cpu = of_get_next_cpu_node(cpu))
cpu                49 include/linux/of_device.h static inline struct device_node *of_cpu_device_node_get(int cpu)
cpu                52 include/linux/of_device.h 	cpu_dev = get_cpu_device(cpu);
cpu                54 include/linux/of_device.h 		return of_get_cpu_node(cpu, NULL);
cpu               104 include/linux/of_device.h static inline struct device_node *of_cpu_device_node_get(int cpu)
cpu                12 include/linux/osq_lock.h 	int cpu; /* encoded CPU # + 1 value */
cpu                38 include/linux/padata.h 	int			cpu;
cpu               122 include/linux/padata.h 	int				cpu;
cpu               233 include/linux/percpu-defs.h #define per_cpu_ptr(ptr, cpu)						\
cpu               236 include/linux/percpu-defs.h 	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)));			\
cpu               263 include/linux/percpu-defs.h #define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
cpu               269 include/linux/percpu-defs.h #define per_cpu(var, cpu)	(*per_cpu_ptr(&(var), cpu))
cpu                98 include/linux/percpu.h typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
cpu               167 include/linux/perf/arm_pmu.h int armpmu_request_irq(int irq, int cpu);
cpu               168 include/linux/perf/arm_pmu.h void armpmu_free_irq(int irq, int cpu);
cpu               663 include/linux/perf_event.h 	int				cpu;
cpu               920 include/linux/perf_event.h 				int cpu,
cpu               959 include/linux/perf_event.h 		u32	cpu;
cpu              1476 include/linux/perf_event.h int perf_event_init_cpu(unsigned int cpu);
cpu              1477 include/linux/perf_event.h int perf_event_exit_cpu(unsigned int cpu);
cpu                16 include/linux/platform_data/arm-ux500-pm.h bool prcmu_is_cpu_in_wfi(int cpu);
cpu               181 include/linux/pm_domain.h 	int cpu;
cpu               216 include/linux/posix-timers.h 		struct cpu_timer	cpu;
cpu               294 include/linux/preempt.h 	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
cpu                16 include/linux/psci.h bool psci_tos_resident_on(int cpu);
cpu                21 include/linux/psi.h void psi_memstall_tick(struct task_struct *task, int cpu);
cpu               229 include/linux/pstore.h pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
cpu               231 include/linux/pstore.h 	rec->ip |= cpu;
cpu               253 include/linux/pstore.h pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
cpu               256 include/linux/pstore.h 	rec->ts |= cpu;
cpu                78 include/linux/rcupdate.h void rcu_report_dead(unsigned int cpu);
cpu                79 include/linux/rcupdate.h void rcutree_migrate_callbacks(int cpu);
cpu                18 include/linux/rcutiny.h static inline bool rcu_eqs_special_set(int cpu) { return false; }
cpu                65 include/linux/rcutiny.h static inline void rcu_virt_note_context_switch(int cpu) { }
cpu                97 include/linux/rcutiny.h static inline void rcu_cpu_starting(unsigned int cpu) { }
cpu                30 include/linux/rcutree.h static inline void rcu_virt_note_context_switch(int cpu)
cpu                39 include/linux/rcutree.h bool rcu_eqs_special_set(int cpu);
cpu                61 include/linux/rcutree.h int rcutree_prepare_cpu(unsigned int cpu);
cpu                62 include/linux/rcutree.h int rcutree_online_cpu(unsigned int cpu);
cpu                63 include/linux/rcutree.h int rcutree_offline_cpu(unsigned int cpu);
cpu                64 include/linux/rcutree.h int rcutree_dead_cpu(unsigned int cpu);
cpu                65 include/linux/rcutree.h int rcutree_dying_cpu(unsigned int cpu);
cpu                66 include/linux/rcutree.h void rcu_cpu_starting(unsigned int cpu);
cpu                53 include/linux/relay.h 	unsigned int cpu;		/* this buf's cpu */
cpu               181 include/linux/relay.h 				   unsigned int cpu,
cpu               294 include/linux/relay.h int relay_prepare_cpu(unsigned int cpu);
cpu               100 include/linux/ring_buffer.h int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
cpu               101 include/linux/ring_buffer.h __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
cpu               109 include/linux/ring_buffer.h int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
cpu               124 include/linux/ring_buffer.h ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
cpu               127 include/linux/ring_buffer.h ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
cpu               131 include/linux/ring_buffer.h ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
cpu               143 include/linux/ring_buffer.h unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
cpu               145 include/linux/ring_buffer.h void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
cpu               150 include/linux/ring_buffer.h 			 struct ring_buffer *buffer_b, int cpu);
cpu               154 include/linux/ring_buffer.h 		     struct ring_buffer *buffer_b, int cpu)
cpu               161 include/linux/ring_buffer.h bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
cpu               169 include/linux/ring_buffer.h void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
cpu               170 include/linux/ring_buffer.h void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
cpu               172 include/linux/ring_buffer.h u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
cpu               173 include/linux/ring_buffer.h unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
cpu               176 include/linux/ring_buffer.h unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
cpu               177 include/linux/ring_buffer.h unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
cpu               178 include/linux/ring_buffer.h unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
cpu               179 include/linux/ring_buffer.h unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
cpu               180 include/linux/ring_buffer.h unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
cpu               182 include/linux/ring_buffer.h u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
cpu               184 include/linux/ring_buffer.h 				      int cpu, u64 *ts);
cpu               190 include/linux/ring_buffer.h size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu);
cpu               191 include/linux/ring_buffer.h size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu);
cpu               193 include/linux/ring_buffer.h void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
cpu               194 include/linux/ring_buffer.h void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
cpu               196 include/linux/ring_buffer.h 			  size_t len, int cpu, int full);
cpu               208 include/linux/ring_buffer.h int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
cpu               436 include/linux/sbitmap.h 				    unsigned int *cpu)
cpu               440 include/linux/sbitmap.h 	*cpu = get_cpu();
cpu               461 include/linux/sbitmap.h 					    unsigned int *cpu,
cpu               466 include/linux/sbitmap.h 	*cpu = get_cpu();
cpu               497 include/linux/sbitmap.h 			 unsigned int cpu);
cpu               652 include/linux/sched.h 	unsigned int			cpu;
cpu              1605 include/linux/sched.h extern int idle_cpu(int cpu);
cpu              1606 include/linux/sched.h extern int available_idle_cpu(int cpu);
cpu              1611 include/linux/sched.h extern struct task_struct *idle_task(int cpu);
cpu              1624 include/linux/sched.h extern struct task_struct *curr_task(int cpu);
cpu              1625 include/linux/sched.h extern void ia64_set_curr_task(int cpu, struct task_struct *p);
cpu              1817 include/linux/sched.h 	return READ_ONCE(p->cpu);
cpu              1819 include/linux/sched.h 	return READ_ONCE(task_thread_info(p)->cpu);
cpu              1823 include/linux/sched.h extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
cpu              1832 include/linux/sched.h static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
cpu              1847 include/linux/sched.h static inline bool vcpu_is_preempted(int cpu)
cpu                21 include/linux/sched/clock.h extern u64 sched_clock_cpu(int cpu);
cpu                43 include/linux/sched/clock.h static inline u64 cpu_clock(int cpu)
cpu                77 include/linux/sched/clock.h static inline u64 cpu_clock(int cpu)
cpu                79 include/linux/sched/clock.h 	return sched_clock_cpu(cpu);
cpu                21 include/linux/sched/cpufreq.h void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
cpu                24 include/linux/sched/cpufreq.h void cpufreq_remove_update_util_hook(int cpu);
cpu                12 include/linux/sched/debug.h extern void dump_cpu_task(int cpu);
cpu                 9 include/linux/sched/hotplug.h extern int sched_cpu_starting(unsigned int cpu);
cpu                10 include/linux/sched/hotplug.h extern int sched_cpu_activate(unsigned int cpu);
cpu                11 include/linux/sched/hotplug.h extern int sched_cpu_deactivate(unsigned int cpu);
cpu                14 include/linux/sched/hotplug.h extern int sched_cpu_dying(unsigned int cpu);
cpu                14 include/linux/sched/idle.h extern void wake_up_if_idle(int cpu);
cpu                24 include/linux/sched/isolation.h extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags);
cpu                49 include/linux/sched/isolation.h static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
cpu                53 include/linux/sched/isolation.h 		return housekeeping_test_cpu(cpu, flags);
cpu                10 include/linux/sched/nohz.h extern void nohz_balance_enter_idle(int cpu);
cpu                13 include/linux/sched/nohz.h static inline void nohz_balance_enter_idle(int cpu) { }
cpu                27 include/linux/sched/nohz.h extern void wake_up_nohz_cpu(int cpu);
cpu                29 include/linux/sched/nohz.h static inline void wake_up_nohz_cpu(int cpu) { }
cpu                22 include/linux/sched/stat.h extern unsigned long nr_iowait_cpu(int cpu);
cpu                48 include/linux/sched/task.h extern void init_idle(struct task_struct *idle, int cpu);
cpu                51 include/linux/sched/topology.h extern int arch_asym_cpu_priority(int cpu);
cpu               222 include/linux/sched/topology.h unsigned long arch_scale_cpu_capacity(int cpu)
cpu               240 include/linux/seq_file.h extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos);
cpu               242 include/linux/seq_file.h extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos);
cpu               708 include/linux/slab.h int slab_prepare_cpu(unsigned int cpu);
cpu               709 include/linux/slab.h int slab_dead_cpu(unsigned int cpu);
cpu                52 include/linux/smp.h void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
cpu                56 include/linux/smp.h void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
cpu                60 include/linux/smp.h int smp_call_function_single_async(int cpu, call_single_data_t *csd);
cpu                83 include/linux/smp.h extern void smp_send_reschedule(int cpu);
cpu               153 include/linux/smp.h static inline void smp_send_reschedule(int cpu) { }
cpu               236 include/linux/smp.h int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
cpu               240 include/linux/smp.h int smpcfd_prepare_cpu(unsigned int cpu);
cpu               241 include/linux/smp.h int smpcfd_dead_cpu(unsigned int cpu);
cpu               242 include/linux/smp.h int smpcfd_dying_cpu(unsigned int cpu);
cpu                34 include/linux/smpboot.h 	int				(*thread_should_run)(unsigned int cpu);
cpu                35 include/linux/smpboot.h 	void				(*thread_fn)(unsigned int cpu);
cpu                36 include/linux/smpboot.h 	void				(*create)(unsigned int cpu);
cpu                37 include/linux/smpboot.h 	void				(*setup)(unsigned int cpu);
cpu                38 include/linux/smpboot.h 	void				(*cleanup)(unsigned int cpu, bool online);
cpu                39 include/linux/smpboot.h 	void				(*park)(unsigned int cpu);
cpu                40 include/linux/smpboot.h 	void				(*unpark)(unsigned int cpu);
cpu                 5 include/linux/soc/renesas/rcar-sysc.h int rcar_sysc_power_down_cpu(unsigned int cpu);
cpu                 6 include/linux/soc/renesas/rcar-sysc.h int rcar_sysc_power_up_cpu(unsigned int cpu);
cpu                41 include/linux/srcutree.h 	int cpu;
cpu                31 include/linux/stop_machine.h int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
cpu                33 include/linux/stop_machine.h bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
cpu                37 include/linux/stop_machine.h void stop_machine_park(int cpu);
cpu                38 include/linux/stop_machine.h void stop_machine_unpark(int cpu);
cpu                51 include/linux/stop_machine.h static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
cpu                55 include/linux/stop_machine.h 	if (cpu == smp_processor_id())
cpu                70 include/linux/stop_machine.h static inline bool stop_one_cpu_nowait(unsigned int cpu,
cpu                74 include/linux/stop_machine.h 	if (cpu == smp_processor_id()) {
cpu               518 include/linux/sunrpc/svc.h struct svc_pool *  svc_pool_for_cpu(struct svc_serv *serv, int cpu);
cpu               339 include/linux/swap.h extern void lru_add_drain_cpu(int cpu);
cpu               732 include/linux/syscalls.h asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
cpu               904 include/linux/syscalls.h 		pid_t pid, int cpu, int group_fd, unsigned long flags);
cpu                22 include/linux/tick.h extern void tick_cleanup_dead_cpu(int cpu);
cpu                28 include/linux/tick.h static inline void tick_cleanup_dead_cpu(int cpu) { }
cpu                72 include/linux/tick.h extern void tick_offline_cpu(unsigned int cpu);
cpu                74 include/linux/tick.h static inline void tick_offline_cpu(unsigned int cpu) { }
cpu               123 include/linux/tick.h extern bool tick_nohz_tick_stopped_cpu(int cpu);
cpu               134 include/linux/tick.h extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
cpu               135 include/linux/tick.h extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
cpu               136 include/linux/tick.h extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
cpu               148 include/linux/tick.h static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
cpu               165 include/linux/tick.h static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
cpu               166 include/linux/tick.h static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
cpu               183 include/linux/tick.h static inline bool tick_nohz_full_cpu(int cpu)
cpu               188 include/linux/tick.h 	return cpumask_test_cpu(cpu, tick_nohz_full_mask);
cpu               199 include/linux/tick.h extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
cpu               200 include/linux/tick.h extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit);
cpu               226 include/linux/tick.h static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit)
cpu               228 include/linux/tick.h 	if (tick_nohz_full_cpu(cpu))
cpu               229 include/linux/tick.h 		tick_nohz_dep_set_cpu(cpu, bit);
cpu               232 include/linux/tick.h static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
cpu               234 include/linux/tick.h 	if (tick_nohz_full_cpu(cpu))
cpu               235 include/linux/tick.h 		tick_nohz_dep_clear_cpu(cpu, bit);
cpu               263 include/linux/tick.h extern void tick_nohz_full_kick_cpu(int cpu);
cpu               268 include/linux/tick.h static inline bool tick_nohz_full_cpu(int cpu) { return false; }
cpu               273 include/linux/tick.h static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
cpu               274 include/linux/tick.h static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
cpu               284 include/linux/tick.h static inline void tick_nohz_full_kick_cpu(int cpu) { }
cpu               170 include/linux/timer.h extern void add_timer_on(struct timer_list *timer, int cpu);
cpu               208 include/linux/timer.h unsigned long __round_jiffies(unsigned long j, int cpu);
cpu               209 include/linux/timer.h unsigned long __round_jiffies_relative(unsigned long j, int cpu);
cpu               213 include/linux/timer.h unsigned long __round_jiffies_up(unsigned long j, int cpu);
cpu               214 include/linux/timer.h unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
cpu               219 include/linux/timer.h int timers_prepare_cpu(unsigned int cpu);
cpu               220 include/linux/timer.h int timers_dead_cpu(unsigned int cpu);
cpu                93 include/linux/topology.h static inline int cpu_to_node(int cpu)
cpu                95 include/linux/topology.h 	return per_cpu(numa_node, cpu);
cpu               107 include/linux/topology.h static inline void set_cpu_numa_node(int cpu, int node)
cpu               109 include/linux/topology.h 	per_cpu(numa_node, cpu) = node;
cpu               159 include/linux/topology.h static inline int cpu_to_mem(int cpu)
cpu               161 include/linux/topology.h 	return per_cpu(_numa_mem_, cpu);
cpu               166 include/linux/topology.h static inline void set_cpu_numa_mem(int cpu, int node)
cpu               168 include/linux/topology.h 	per_cpu(_numa_mem_, cpu) = node;
cpu               169 include/linux/topology.h 	_node_numa_mem_[cpu_to_node(cpu)] = node;
cpu               191 include/linux/topology.h static inline int cpu_to_mem(int cpu)
cpu               193 include/linux/topology.h 	return cpu_to_node(cpu);
cpu               200 include/linux/topology.h #define topology_physical_package_id(cpu)	((void)(cpu), -1)
cpu               203 include/linux/topology.h #define topology_die_id(cpu)			((void)(cpu), -1)
cpu               206 include/linux/topology.h #define topology_core_id(cpu)			((void)(cpu), 0)
cpu               209 include/linux/topology.h #define topology_sibling_cpumask(cpu)		cpumask_of(cpu)
cpu               212 include/linux/topology.h #define topology_core_cpumask(cpu)		cpumask_of(cpu)
cpu               215 include/linux/topology.h #define topology_die_cpumask(cpu)		cpumask_of(cpu)
cpu               219 include/linux/topology.h static inline const struct cpumask *cpu_smt_mask(int cpu)
cpu               221 include/linux/topology.h 	return topology_sibling_cpumask(cpu);
cpu               225 include/linux/topology.h static inline const struct cpumask *cpu_cpu_mask(int cpu)
cpu               227 include/linux/topology.h 	return cpumask_of_node(cpu_to_node(cpu));
cpu                41 include/linux/torture.h bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes,
cpu                43 include/linux/torture.h bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
cpu                98 include/linux/trace_events.h 	int			cpu;
cpu                77 include/linux/vmstat.h extern void vm_events_fold_cpu(int cpu);
cpu                97 include/linux/vmstat.h static inline void vm_events_fold_cpu(int cpu)
cpu               154 include/linux/vmstat.h 	int cpu;
cpu               156 include/linux/vmstat.h 	for_each_online_cpu(cpu)
cpu               157 include/linux/vmstat.h 		x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
cpu               220 include/linux/vmstat.h 	int cpu;
cpu               221 include/linux/vmstat.h 	for_each_online_cpu(cpu)
cpu               222 include/linux/vmstat.h 		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
cpu               267 include/linux/vmstat.h void cpu_vm_stats_fold(int cpu);
cpu               367 include/linux/vmstat.h static inline void cpu_vm_stats_fold(int cpu) { }
cpu                75 include/linux/vtime.h extern void vtime_init_idle(struct task_struct *tsk, int cpu);
cpu                81 include/linux/vtime.h static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
cpu               121 include/linux/workqueue.h 	int cpu;
cpu               444 include/linux/workqueue.h extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
cpu               448 include/linux/workqueue.h extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
cpu               450 include/linux/workqueue.h extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
cpu               474 include/linux/workqueue.h extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
cpu               534 include/linux/workqueue.h static inline bool schedule_work_on(int cpu, struct work_struct *work)
cpu               536 include/linux/workqueue.h 	return queue_work_on(cpu, system_wq, work);
cpu               593 include/linux/workqueue.h static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
cpu               596 include/linux/workqueue.h 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
cpu               614 include/linux/workqueue.h static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
cpu               618 include/linux/workqueue.h static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
cpu               623 include/linux/workqueue.h long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
cpu               624 include/linux/workqueue.h long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
cpu               641 include/linux/workqueue.h void wq_watchdog_touch(int cpu);
cpu               643 include/linux/workqueue.h static inline void wq_watchdog_touch(int cpu) { }
cpu               647 include/linux/workqueue.h int workqueue_prepare_cpu(unsigned int cpu);
cpu               648 include/linux/workqueue.h int workqueue_online_cpu(unsigned int cpu);
cpu               649 include/linux/workqueue.h int workqueue_offline_cpu(unsigned int cpu);
cpu                66 include/media/drv-intf/saa7146.h 	__le32		*cpu;
cpu                41 include/net/gen_stats.h 			  struct gnet_stats_basic_cpu __percpu *cpu,
cpu                45 include/net/gen_stats.h 			     struct gnet_stats_basic_cpu __percpu *cpu,
cpu                49 include/net/gen_stats.h 			     struct gnet_stats_basic_cpu __percpu *cpu,
cpu               296 include/net/ip.h u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
cpu               299 include/net/ip.h u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
cpu               303 include/net/ip.h static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
cpu               306 include/net/ip.h 	return snmp_get_cpu_field(mib, cpu, offct);
cpu                83 include/net/netfilter/nf_conntrack.h 	u16		cpu;
cpu               960 include/net/sock.h 	int cpu = raw_smp_processor_id();
cpu               962 include/net/sock.h 	if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
cpu               963 include/net/sock.h 		WRITE_ONCE(sk->sk_incoming_cpu, cpu);
cpu                53 include/soc/fsl/dpaa2-io.h 	int cpu;
cpu                67 include/soc/fsl/dpaa2-io.h struct dpaa2_io *dpaa2_io_service_select(int cpu);
cpu                32 include/soc/imx/revision.h void imx_print_silicon_rev(const char *cpu, int srev);
cpu               117 include/soc/nps/common.h #define NPS_CPU_TO_CLUSTER_NUM(cpu) \
cpu               118 include/soc/nps/common.h 	({ struct global_id gid; gid.value = cpu; \
cpu               151 include/soc/nps/common.h static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg)
cpu               154 include/soc/nps/common.h 	u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu);
cpu                69 include/sound/simple_card_utils.h 	int cpu;  /* turn for CPU / Codec */
cpu               915 include/sound/soc.h #define SND_SOC_DAILINK_REG2(cpu, codec) SND_SOC_DAILINK_REG3(cpu, codec, null_dailink_component)
cpu               916 include/sound/soc.h #define SND_SOC_DAILINK_REG3(cpu, codec, platform)	\
cpu               917 include/sound/soc.h 	.cpus		= cpu,				\
cpu               918 include/sound/soc.h 	.num_cpus	= ARRAY_SIZE(cpu),		\
cpu               934 include/sound/soc.h #define SND_SOC_DAILINK_DEFS(name, cpu, codec, platform...)	\
cpu               935 include/sound/soc.h 	SND_SOC_DAILINK_DEF(name##_cpus, cpu);			\
cpu                12 include/trace/events/cpuhp.h 	TP_PROTO(unsigned int cpu,
cpu                17 include/trace/events/cpuhp.h 	TP_ARGS(cpu, target, idx, fun),
cpu                20 include/trace/events/cpuhp.h 		__field( unsigned int,	cpu		)
cpu                27 include/trace/events/cpuhp.h 		__entry->cpu	= cpu;
cpu                34 include/trace/events/cpuhp.h 		  __entry->cpu, __entry->target, __entry->idx, __entry->fun)
cpu                39 include/trace/events/cpuhp.h 	TP_PROTO(unsigned int cpu,
cpu                45 include/trace/events/cpuhp.h 	TP_ARGS(cpu, target, idx, fun, node),
cpu                48 include/trace/events/cpuhp.h 		__field( unsigned int,	cpu		)
cpu                55 include/trace/events/cpuhp.h 		__entry->cpu	= cpu;
cpu                62 include/trace/events/cpuhp.h 		  __entry->cpu, __entry->target, __entry->idx, __entry->fun)
cpu                67 include/trace/events/cpuhp.h 	TP_PROTO(unsigned int cpu,
cpu                72 include/trace/events/cpuhp.h 	TP_ARGS(cpu, state, idx, ret),
cpu                75 include/trace/events/cpuhp.h 		__field( unsigned int,	cpu		)
cpu                82 include/trace/events/cpuhp.h 		__entry->cpu	= cpu;
cpu                89 include/trace/events/cpuhp.h 		  __entry->cpu, __entry->state, __entry->idx,  __entry->ret)
cpu                67 include/trace/events/irq_matrix.h 	TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix,
cpu                70 include/trace/events/irq_matrix.h 	TP_ARGS(bit, cpu, matrix, cmap),
cpu                74 include/trace/events/irq_matrix.h 		__field(	unsigned int,	cpu			)
cpu                87 include/trace/events/irq_matrix.h 		__entry->cpu			= cpu;
cpu                99 include/trace/events/irq_matrix.h 		  __entry->bit, __entry->cpu, __entry->online,
cpu               143 include/trace/events/irq_matrix.h 	TP_PROTO(int bit, unsigned int cpu,
cpu               146 include/trace/events/irq_matrix.h 	TP_ARGS(bit, cpu, matrix, cmap)
cpu               151 include/trace/events/irq_matrix.h 	TP_PROTO(int bit, unsigned int cpu,
cpu               154 include/trace/events/irq_matrix.h 	TP_ARGS(bit, cpu, matrix, cmap)
cpu               159 include/trace/events/irq_matrix.h 	TP_PROTO(int bit, unsigned int cpu,
cpu               162 include/trace/events/irq_matrix.h 	TP_ARGS(bit, cpu, matrix, cmap)
cpu               167 include/trace/events/irq_matrix.h 	TP_PROTO(int bit, unsigned int cpu,
cpu               170 include/trace/events/irq_matrix.h 	TP_ARGS(bit, cpu, matrix, cmap)
cpu               175 include/trace/events/irq_matrix.h 	TP_PROTO(int bit, unsigned int cpu,
cpu               178 include/trace/events/irq_matrix.h 	TP_ARGS(bit, cpu, matrix, cmap)
cpu               183 include/trace/events/irq_matrix.h 	TP_PROTO(int bit, unsigned int cpu,
cpu               186 include/trace/events/irq_matrix.h 	TP_ARGS(bit, cpu, matrix, cmap)
cpu               191 include/trace/events/irq_matrix.h 	TP_PROTO(int bit, unsigned int cpu,
cpu               194 include/trace/events/irq_matrix.h 	TP_ARGS(bit, cpu, matrix, cmap)
cpu                29 include/trace/events/mce.h 		__field(	u32,		cpu		)
cpu                49 include/trace/events/mce.h 		__entry->cpu		= m->extcpu;
cpu                59 include/trace/events/mce.h 		__entry->cpu,
cpu                16 include/trace/events/power.h DECLARE_EVENT_CLASS(cpu,
cpu                36 include/trace/events/power.h DEFINE_EVENT(cpu, cpu_idle,
cpu               145 include/trace/events/power.h DEFINE_EVENT(cpu, cpu_frequency,
cpu               167 include/trace/events/power.h 		__entry->cpu_id = policy->cpu;
cpu               278 include/trace/events/rcu.h 	TP_PROTO(const char *rcuname, int cpu, const char *reason),
cpu               280 include/trace/events/rcu.h 	TP_ARGS(rcuname, cpu, reason),
cpu               284 include/trace/events/rcu.h 		__field(int, cpu)
cpu               290 include/trace/events/rcu.h 		__entry->cpu = cpu;
cpu               294 include/trace/events/rcu.h 	TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
cpu               405 include/trace/events/rcu.h 	TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
cpu               407 include/trace/events/rcu.h 	TP_ARGS(rcuname, gp_seq, cpu, qsevent),
cpu               412 include/trace/events/rcu.h 		__field(int, cpu)
cpu               419 include/trace/events/rcu.h 		__entry->cpu = cpu;
cpu               425 include/trace/events/rcu.h 		  __entry->cpu, __entry->qsevent)
cpu               730 include/trace/events/rcu.h 	TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
cpu               732 include/trace/events/rcu.h 	TP_ARGS(rcuname, s, cpu, cnt, done),
cpu               737 include/trace/events/rcu.h 		__field(int, cpu)
cpu               745 include/trace/events/rcu.h 		__entry->cpu = cpu;
cpu               751 include/trace/events/rcu.h 		  __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
cpu               583 include/trace/events/sched.h 	TP_PROTO(int cpu),
cpu               585 include/trace/events/sched.h 	TP_ARGS(cpu),
cpu               588 include/trace/events/sched.h 		__field(	int,	cpu	)
cpu               592 include/trace/events/sched.h 		__entry->cpu	= cpu;
cpu               595 include/trace/events/sched.h 	TP_printk("cpu=%d", __entry->cpu)
cpu                52 include/trace/events/workqueue.h 		__field( unsigned int,	cpu	)
cpu                60 include/trace/events/workqueue.h 		__entry->cpu		= pwq->pool->cpu;
cpu                65 include/trace/events/workqueue.h 		  __entry->req_cpu, __entry->cpu)
cpu               200 include/trace/events/xdp.h 		__field(int, cpu)
cpu               209 include/trace/events/xdp.h 		__entry->cpu		= smp_processor_id();
cpu               219 include/trace/events/xdp.h 		  __entry->cpu, __entry->map_id,
cpu               235 include/trace/events/xdp.h 		__field(int, cpu)
cpu               244 include/trace/events/xdp.h 		__entry->cpu		= smp_processor_id();
cpu               254 include/trace/events/xdp.h 		  __entry->cpu, __entry->map_id,
cpu               110 include/uapi/linux/blktrace_api.h 	__u32 cpu;		/* on what cpu did it happen */
cpu                65 include/uapi/linux/cn_proc.h 	__u32 cpu;
cpu                 8 include/uapi/linux/netfilter/xt_cpu.h 	__u32	cpu;
cpu                22 include/xen/events.h int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
cpu                23 include/xen/events.h int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
cpu                28 include/xen/events.h 			   unsigned int cpu,
cpu                61 include/xen/events.h void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
cpu                90 include/xen/events.h int irq_from_virq(unsigned int cpu, unsigned int virq);
cpu               298 include/xen/interface/platform.h 	uint32_t cpu;   /* Physical cpu. */
cpu               350 include/xen/interface/xen-mca.h 	__u8  cpu;	/* cpu number; obsolete; use extcpu now */
cpu                15 include/xen/xen-ops.h static inline uint32_t xen_vcpu_nr(int cpu)
cpu                17 include/xen/xen-ops.h 	return per_cpu(xen_vcpu_id, cpu);
cpu                35 include/xen/xen-ops.h void xen_setup_runstate_info(int cpu);
cpu                39 include/xen/xen-ops.h u64 xen_steal_clock(int cpu);
cpu               231 kernel/bpf/arraymap.c 	int cpu, off = 0;
cpu               244 kernel/bpf/arraymap.c 	for_each_possible_cpu(cpu) {
cpu               245 kernel/bpf/arraymap.c 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
cpu               315 kernel/bpf/arraymap.c 	int cpu, off = 0;
cpu               339 kernel/bpf/arraymap.c 	for_each_possible_cpu(cpu) {
cpu               340 kernel/bpf/arraymap.c 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
cpu               398 kernel/bpf/arraymap.c 	int cpu;
cpu               404 kernel/bpf/arraymap.c 	for_each_possible_cpu(cpu) {
cpu               405 kernel/bpf/arraymap.c 		seq_printf(m, "\tcpu%d: ", cpu);
cpu               407 kernel/bpf/arraymap.c 				  per_cpu_ptr(pptr, cpu), m);
cpu                22 kernel/bpf/bpf_lru_list.c static int get_next_cpu(int cpu)
cpu                24 kernel/bpf/bpf_lru_list.c 	cpu = cpumask_next(cpu, cpu_possible_mask);
cpu                25 kernel/bpf/bpf_lru_list.c 	if (cpu >= nr_cpu_ids)
cpu                26 kernel/bpf/bpf_lru_list.c 		cpu = cpumask_first(cpu_possible_mask);
cpu                27 kernel/bpf/bpf_lru_list.c 	return cpu;
cpu               349 kernel/bpf/bpf_lru_list.c 				     int cpu,
cpu               354 kernel/bpf/bpf_lru_list.c 	node->cpu = cpu;
cpu               406 kernel/bpf/bpf_lru_list.c 	int cpu = raw_smp_processor_id();
cpu               408 kernel/bpf/bpf_lru_list.c 	l = per_cpu_ptr(lru->percpu_lru, cpu);
cpu               439 kernel/bpf/bpf_lru_list.c 	int cpu = raw_smp_processor_id();
cpu               441 kernel/bpf/bpf_lru_list.c 	loc_l = per_cpu_ptr(clru->local_list, cpu);
cpu               452 kernel/bpf/bpf_lru_list.c 		__local_list_add_pending(lru, loc_l, cpu, node, hash);
cpu               487 kernel/bpf/bpf_lru_list.c 		__local_list_add_pending(lru, loc_l, cpu, node, hash);
cpu               514 kernel/bpf/bpf_lru_list.c 		loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
cpu               541 kernel/bpf/bpf_lru_list.c 	l = per_cpu_ptr(lru->percpu_lru, node->cpu);
cpu               581 kernel/bpf/bpf_lru_list.c 	int cpu;
cpu               588 kernel/bpf/bpf_lru_list.c 	for_each_possible_cpu(cpu) {
cpu               591 kernel/bpf/bpf_lru_list.c 		l = per_cpu_ptr(lru->percpu_lru, cpu);
cpu               594 kernel/bpf/bpf_lru_list.c 		node->cpu = cpu;
cpu               618 kernel/bpf/bpf_lru_list.c static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
cpu               625 kernel/bpf/bpf_lru_list.c 	loc_l->next_steal = cpu;
cpu               648 kernel/bpf/bpf_lru_list.c 	int cpu;
cpu               655 kernel/bpf/bpf_lru_list.c 		for_each_possible_cpu(cpu) {
cpu               658 kernel/bpf/bpf_lru_list.c 			l = per_cpu_ptr(lru->percpu_lru, cpu);
cpu               669 kernel/bpf/bpf_lru_list.c 		for_each_possible_cpu(cpu) {
cpu               672 kernel/bpf/bpf_lru_list.c 			loc_l = per_cpu_ptr(clru->local_list, cpu);
cpu               673 kernel/bpf/bpf_lru_list.c 			bpf_lru_locallist_init(loc_l, cpu);
cpu                25 kernel/bpf/bpf_lru_list.h 	u16 cpu;
cpu               108 kernel/bpf/core.c 	int cpu;
cpu               121 kernel/bpf/core.c 	for_each_possible_cpu(cpu) {
cpu               124 kernel/bpf/core.c 		pstats = per_cpu_ptr(prog->aux->stats, cpu);
cpu                53 kernel/bpf/cpumap.c 	u32 cpu;    /* kthread CPU and map index */
cpu                84 kernel/bpf/cpumap.c 	int ret, cpu;
cpu               122 kernel/bpf/cpumap.c 	for_each_possible_cpu(cpu)
cpu               123 kernel/bpf/cpumap.c 		INIT_LIST_HEAD(per_cpu_ptr(cmap->flush_list, cpu));
cpu               334 kernel/bpf/cpumap.c static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
cpu               343 kernel/bpf/cpumap.c 	numa = cpu_to_node(cpu);
cpu               369 kernel/bpf/cpumap.c 	rcpu->cpu    = cpu;
cpu               375 kernel/bpf/cpumap.c 					       "cpumap/%d/map:%d", cpu, map_id);
cpu               383 kernel/bpf/cpumap.c 	kthread_bind(rcpu->kthread, cpu);
cpu               402 kernel/bpf/cpumap.c 	int cpu;
cpu               412 kernel/bpf/cpumap.c 	for_each_online_cpu(cpu) {
cpu               413 kernel/bpf/cpumap.c 		struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
cpu               510 kernel/bpf/cpumap.c 	int cpu;
cpu               530 kernel/bpf/cpumap.c 	for_each_online_cpu(cpu) {
cpu               531 kernel/bpf/cpumap.c 		struct list_head *flush_list = per_cpu_ptr(cmap->flush_list, cpu);
cpu               606 kernel/bpf/cpumap.c 	const int to_cpu = rcpu->cpu;
cpu               112 kernel/bpf/devmap.c 	int err, cpu;
cpu               150 kernel/bpf/devmap.c 	for_each_possible_cpu(cpu)
cpu               151 kernel/bpf/devmap.c 		INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
cpu               204 kernel/bpf/devmap.c 	int i, cpu;
cpu               229 kernel/bpf/devmap.c 	for_each_online_cpu(cpu) {
cpu               230 kernel/bpf/devmap.c 		struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu);
cpu               517 kernel/bpf/devmap.c 		int cpu;
cpu               520 kernel/bpf/devmap.c 		for_each_online_cpu(cpu) {
cpu               521 kernel/bpf/devmap.c 			bq = per_cpu_ptr(dev->bulkq, cpu);
cpu               592 kernel/bpf/devmap.c 	int cpu;
cpu               605 kernel/bpf/devmap.c 	for_each_possible_cpu(cpu) {
cpu               606 kernel/bpf/devmap.c 		bq = per_cpu_ptr(dev->bulkq, cpu);
cpu               206 kernel/bpf/hashtab.c 	int cpu;
cpu               213 kernel/bpf/hashtab.c 	for_each_possible_cpu(cpu) {
cpu               219 kernel/bpf/hashtab.c 		*per_cpu_ptr(pptr, cpu) = l_new;
cpu               705 kernel/bpf/hashtab.c 		int off = 0, cpu;
cpu               707 kernel/bpf/hashtab.c 		for_each_possible_cpu(cpu) {
cpu               708 kernel/bpf/hashtab.c 			bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
cpu              1288 kernel/bpf/hashtab.c 	int cpu, off = 0;
cpu              1304 kernel/bpf/hashtab.c 	for_each_possible_cpu(cpu) {
cpu              1306 kernel/bpf/hashtab.c 				per_cpu_ptr(pptr, cpu), size);
cpu              1338 kernel/bpf/hashtab.c 	int cpu;
cpu              1351 kernel/bpf/hashtab.c 	for_each_possible_cpu(cpu) {
cpu              1352 kernel/bpf/hashtab.c 		seq_printf(m, "\tcpu%d: ", cpu);
cpu              1354 kernel/bpf/hashtab.c 				  per_cpu_ptr(pptr, cpu), m);
cpu               176 kernel/bpf/local_storage.c 	int cpu, off = 0;
cpu               191 kernel/bpf/local_storage.c 	for_each_possible_cpu(cpu) {
cpu               193 kernel/bpf/local_storage.c 				per_cpu_ptr(storage->percpu_buf, cpu), size);
cpu               206 kernel/bpf/local_storage.c 	int cpu, off = 0;
cpu               226 kernel/bpf/local_storage.c 	for_each_possible_cpu(cpu) {
cpu               227 kernel/bpf/local_storage.c 		bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
cpu               382 kernel/bpf/local_storage.c 	int cpu;
cpu               400 kernel/bpf/local_storage.c 		for_each_possible_cpu(cpu) {
cpu               401 kernel/bpf/local_storage.c 			seq_printf(m, "\tcpu%d: ", cpu);
cpu               403 kernel/bpf/local_storage.c 					  per_cpu_ptr(storage->percpu_buf, cpu),
cpu                 8 kernel/bpf/percpu_freelist.c 	int cpu;
cpu                14 kernel/bpf/percpu_freelist.c 	for_each_possible_cpu(cpu) {
cpu                15 kernel/bpf/percpu_freelist.c 		struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
cpu                60 kernel/bpf/percpu_freelist.c 	int i, cpu, pcpu_entries;
cpu                70 kernel/bpf/percpu_freelist.c 	for_each_possible_cpu(cpu) {
cpu                72 kernel/bpf/percpu_freelist.c 		head = per_cpu_ptr(s->freelist, cpu);
cpu                88 kernel/bpf/percpu_freelist.c 	int orig_cpu, cpu;
cpu                90 kernel/bpf/percpu_freelist.c 	orig_cpu = cpu = raw_smp_processor_id();
cpu                92 kernel/bpf/percpu_freelist.c 		head = per_cpu_ptr(s->freelist, cpu);
cpu               101 kernel/bpf/percpu_freelist.c 		cpu = cpumask_next(cpu, cpu_possible_mask);
cpu               102 kernel/bpf/percpu_freelist.c 		if (cpu >= nr_cpu_ids)
cpu               103 kernel/bpf/percpu_freelist.c 			cpu = 0;
cpu               104 kernel/bpf/percpu_freelist.c 		if (cpu == orig_cpu)
cpu               623 kernel/bpf/stackmap.c 	int cpu;
cpu               626 kernel/bpf/stackmap.c 	for_each_possible_cpu(cpu) {
cpu               627 kernel/bpf/stackmap.c 		work = per_cpu_ptr(&up_read_work, cpu);
cpu              1381 kernel/bpf/syscall.c 	int cpu;
cpu              1383 kernel/bpf/syscall.c 	for_each_possible_cpu(cpu) {
cpu              1388 kernel/bpf/syscall.c 		st = per_cpu_ptr(prog->aux->stats, cpu);
cpu                84 kernel/bpf/xskmap.c 	int cpu, err;
cpu               116 kernel/bpf/xskmap.c 	for_each_possible_cpu(cpu)
cpu               117 kernel/bpf/xskmap.c 		INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
cpu                 9 kernel/cgroup/rstat.c static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
cpu                11 kernel/cgroup/rstat.c static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
cpu                13 kernel/cgroup/rstat.c 	return per_cpu_ptr(cgrp->rstat_cpu, cpu);
cpu                25 kernel/cgroup/rstat.c void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
cpu                27 kernel/cgroup/rstat.c 	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
cpu                43 kernel/cgroup/rstat.c 	if (cgroup_rstat_cpu(cgrp, cpu)->updated_next)
cpu                51 kernel/cgroup/rstat.c 		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
cpu                52 kernel/cgroup/rstat.c 		struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
cpu                85 kernel/cgroup/rstat.c 						   struct cgroup *root, int cpu)
cpu               103 kernel/cgroup/rstat.c 		rstatc = cgroup_rstat_cpu(pos, cpu);
cpu               117 kernel/cgroup/rstat.c 		struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
cpu               123 kernel/cgroup/rstat.c 			nrstatc = cgroup_rstat_cpu(*nextp, cpu);
cpu               145 kernel/cgroup/rstat.c 	int cpu;
cpu               149 kernel/cgroup/rstat.c 	for_each_possible_cpu(cpu) {
cpu               151 kernel/cgroup/rstat.c 						       cpu);
cpu               155 kernel/cgroup/rstat.c 		while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
cpu               158 kernel/cgroup/rstat.c 			cgroup_base_stat_flush(pos, cpu);
cpu               163 kernel/cgroup/rstat.c 				css->ss->css_rstat_flush(css, cpu);
cpu               244 kernel/cgroup/rstat.c 	int cpu;
cpu               254 kernel/cgroup/rstat.c 	for_each_possible_cpu(cpu) {
cpu               255 kernel/cgroup/rstat.c 		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
cpu               266 kernel/cgroup/rstat.c 	int cpu;
cpu               271 kernel/cgroup/rstat.c 	for_each_possible_cpu(cpu) {
cpu               272 kernel/cgroup/rstat.c 		struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
cpu               285 kernel/cgroup/rstat.c 	int cpu;
cpu               287 kernel/cgroup/rstat.c 	for_each_possible_cpu(cpu)
cpu               288 kernel/cgroup/rstat.c 		raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
cpu               305 kernel/cgroup/rstat.c static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
cpu               308 kernel/cgroup/rstat.c 	struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
cpu               189 kernel/context_tracking.c void __init context_tracking_cpu_set(int cpu)
cpu               193 kernel/context_tracking.c 	if (!per_cpu(context_tracking.active, cpu)) {
cpu               194 kernel/context_tracking.c 		per_cpu(context_tracking.active, cpu) = true;
cpu               214 kernel/context_tracking.c 	int cpu;
cpu               216 kernel/context_tracking.c 	for_each_possible_cpu(cpu)
cpu               217 kernel/context_tracking.c 		context_tracking_cpu_set(cpu);
cpu               115 kernel/cpu.c   		int		(*single)(unsigned int cpu);
cpu               116 kernel/cpu.c   		int		(*multi)(unsigned int cpu,
cpu               120 kernel/cpu.c   		int		(*single)(unsigned int cpu);
cpu               121 kernel/cpu.c   		int		(*multi)(unsigned int cpu,
cpu               147 kernel/cpu.c   static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
cpu               151 kernel/cpu.c   	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu               153 kernel/cpu.c   	int (*cbm)(unsigned int cpu, struct hlist_node *node);
cpu               154 kernel/cpu.c   	int (*cb)(unsigned int cpu);
cpu               171 kernel/cpu.c   		trace_cpuhp_enter(cpu, st->target, state, cb);
cpu               172 kernel/cpu.c   		ret = cb(cpu);
cpu               173 kernel/cpu.c   		trace_cpuhp_exit(cpu, st->state, state, ret);
cpu               183 kernel/cpu.c   		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
cpu               184 kernel/cpu.c   		ret = cbm(cpu, node);
cpu               185 kernel/cpu.c   		trace_cpuhp_exit(cpu, st->state, state, ret);
cpu               195 kernel/cpu.c   		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
cpu               196 kernel/cpu.c   		ret = cbm(cpu, node);
cpu               197 kernel/cpu.c   		trace_cpuhp_exit(cpu, st->state, state, ret);
cpu               220 kernel/cpu.c   		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
cpu               221 kernel/cpu.c   		ret = cbm(cpu, node);
cpu               222 kernel/cpu.c   		trace_cpuhp_exit(cpu, st->state, state, ret);
cpu               424 kernel/cpu.c   static inline bool cpu_smt_allowed(unsigned int cpu)
cpu               429 kernel/cpu.c   	if (topology_is_primary_thread(cpu))
cpu               438 kernel/cpu.c   	return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
cpu               449 kernel/cpu.c   static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
cpu               519 kernel/cpu.c   static int bringup_wait_for_ap(unsigned int cpu)
cpu               521 kernel/cpu.c   	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu               525 kernel/cpu.c   	if (WARN_ON_ONCE((!cpu_online(cpu))))
cpu               538 kernel/cpu.c   	if (!cpu_smt_allowed(cpu))
cpu               547 kernel/cpu.c   static int bringup_cpu(unsigned int cpu)
cpu               549 kernel/cpu.c   	struct task_struct *idle = idle_thread_get(cpu);
cpu               560 kernel/cpu.c   	ret = __cpu_up(cpu, idle);
cpu               564 kernel/cpu.c   	return bringup_wait_for_ap(cpu);
cpu               571 kernel/cpu.c   static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
cpu               574 kernel/cpu.c   		cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
cpu               591 kernel/cpu.c   static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
cpu               599 kernel/cpu.c   		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
cpu               603 kernel/cpu.c   				undo_cpu_up(cpu, st);
cpu               614 kernel/cpu.c   static void cpuhp_create(unsigned int cpu)
cpu               616 kernel/cpu.c   	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu               622 kernel/cpu.c   static int cpuhp_should_run(unsigned int cpu)
cpu               643 kernel/cpu.c   static void cpuhp_thread_fun(unsigned int cpu)
cpu               687 kernel/cpu.c   		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
cpu               695 kernel/cpu.c   		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
cpu               717 kernel/cpu.c   cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
cpu               720 kernel/cpu.c   	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu               723 kernel/cpu.c   	if (!cpu_online(cpu))
cpu               737 kernel/cpu.c   		return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
cpu               767 kernel/cpu.c   static int cpuhp_kick_ap_work(unsigned int cpu)
cpu               769 kernel/cpu.c   	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu               779 kernel/cpu.c   	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
cpu               781 kernel/cpu.c   	trace_cpuhp_exit(cpu, st->state, prev_state, ret);
cpu               814 kernel/cpu.c   void clear_tasks_mm_cpumask(int cpu)
cpu               825 kernel/cpu.c   	WARN_ON(cpu_online(cpu));
cpu               837 kernel/cpu.c   		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
cpu               848 kernel/cpu.c   	int err, cpu = smp_processor_id();
cpu               864 kernel/cpu.c   		ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
cpu               874 kernel/cpu.c   	tick_offline_cpu(cpu);
cpu               876 kernel/cpu.c   	stop_machine_park(cpu);
cpu               880 kernel/cpu.c   static int takedown_cpu(unsigned int cpu)
cpu               882 kernel/cpu.c   	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu               886 kernel/cpu.c   	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
cpu               897 kernel/cpu.c   	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
cpu               902 kernel/cpu.c   		kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
cpu               905 kernel/cpu.c   	BUG_ON(cpu_online(cpu));
cpu               920 kernel/cpu.c   	hotplug_cpu__broadcast_tick_pull(cpu);
cpu               922 kernel/cpu.c   	__cpu_die(cpu);
cpu               924 kernel/cpu.c   	tick_cleanup_dead_cpu(cpu);
cpu               925 kernel/cpu.c   	rcutree_migrate_callbacks(cpu);
cpu               951 kernel/cpu.c   static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
cpu               954 kernel/cpu.c   		cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
cpu               957 kernel/cpu.c   static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
cpu               964 kernel/cpu.c   		ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
cpu               968 kernel/cpu.c   				undo_cpu_down(cpu, st);
cpu               976 kernel/cpu.c   static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
cpu               979 kernel/cpu.c   	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu               985 kernel/cpu.c   	if (!cpu_present(cpu))
cpu               999 kernel/cpu.c   		ret = cpuhp_kick_ap_work(cpu);
cpu              1020 kernel/cpu.c   	ret = cpuhp_down_callbacks(cpu, st, target);
cpu              1037 kernel/cpu.c   static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
cpu              1041 kernel/cpu.c   	return _cpu_down(cpu, 0, target);
cpu              1044 kernel/cpu.c   static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
cpu              1049 kernel/cpu.c   	err = cpu_down_maps_locked(cpu, target);
cpu              1054 kernel/cpu.c   int cpu_down(unsigned int cpu)
cpu              1056 kernel/cpu.c   	return do_cpu_down(cpu, CPUHP_OFFLINE);
cpu              1071 kernel/cpu.c   void notify_cpu_starting(unsigned int cpu)
cpu              1073 kernel/cpu.c   	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu              1077 kernel/cpu.c   	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
cpu              1078 kernel/cpu.c   	cpumask_set_cpu(cpu, &cpus_booted_once_mask);
cpu              1081 kernel/cpu.c   		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
cpu              1113 kernel/cpu.c   static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
cpu              1115 kernel/cpu.c   	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu              1121 kernel/cpu.c   	if (!cpu_present(cpu)) {
cpu              1135 kernel/cpu.c   		idle = idle_thread_get(cpu);
cpu              1150 kernel/cpu.c   		ret = cpuhp_kick_ap_work(cpu);
cpu              1165 kernel/cpu.c   	ret = cpuhp_up_callbacks(cpu, st, target);
cpu              1172 kernel/cpu.c   static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
cpu              1176 kernel/cpu.c   	if (!cpu_possible(cpu)) {
cpu              1178 kernel/cpu.c   		       cpu);
cpu              1185 kernel/cpu.c   	err = try_online_node(cpu_to_node(cpu));
cpu              1195 kernel/cpu.c   	if (!cpu_smt_allowed(cpu)) {
cpu              1200 kernel/cpu.c   	err = _cpu_up(cpu, 0, target);
cpu              1206 kernel/cpu.c   int cpu_up(unsigned int cpu)
cpu              1208 kernel/cpu.c   	return do_cpu_up(cpu, CPUHP_ONLINE);
cpu              1217 kernel/cpu.c   	int cpu, error = 0;
cpu              1236 kernel/cpu.c   	for_each_online_cpu(cpu) {
cpu              1237 kernel/cpu.c   		if (cpu == primary)
cpu              1246 kernel/cpu.c   		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
cpu              1247 kernel/cpu.c   		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
cpu              1248 kernel/cpu.c   		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
cpu              1250 kernel/cpu.c   			cpumask_set_cpu(cpu, frozen_cpus);
cpu              1252 kernel/cpu.c   			pr_err("Error taking CPU%d down: %d\n", cpu, error);
cpu              1283 kernel/cpu.c   	int cpu, error;
cpu              1295 kernel/cpu.c   	for_each_cpu(cpu, frozen_cpus) {
cpu              1296 kernel/cpu.c   		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
cpu              1297 kernel/cpu.c   		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
cpu              1298 kernel/cpu.c   		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
cpu              1300 kernel/cpu.c   			pr_info("CPU%d is up\n", cpu);
cpu              1303 kernel/cpu.c   		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
cpu              1576 kernel/cpu.c   				 int (*startup)(unsigned int cpu),
cpu              1577 kernel/cpu.c   				 int (*teardown)(unsigned int cpu),
cpu              1621 kernel/cpu.c   static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
cpu              1640 kernel/cpu.c   		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
cpu              1642 kernel/cpu.c   		ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
cpu              1644 kernel/cpu.c   	ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
cpu              1658 kernel/cpu.c   	int cpu;
cpu              1661 kernel/cpu.c   	for_each_present_cpu(cpu) {
cpu              1662 kernel/cpu.c   		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu              1665 kernel/cpu.c   		if (cpu >= failedcpu)
cpu              1670 kernel/cpu.c   			cpuhp_issue_call(cpu, state, false, node);
cpu              1679 kernel/cpu.c   	int cpu;
cpu              1697 kernel/cpu.c   	for_each_present_cpu(cpu) {
cpu              1698 kernel/cpu.c   		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu              1704 kernel/cpu.c   		ret = cpuhp_issue_call(cpu, state, true, node);
cpu              1707 kernel/cpu.c   				cpuhp_rollback_install(cpu, state, node);
cpu              1750 kernel/cpu.c   				   int (*startup)(unsigned int cpu),
cpu              1751 kernel/cpu.c   				   int (*teardown)(unsigned int cpu),
cpu              1754 kernel/cpu.c   	int cpu, ret = 0;
cpu              1780 kernel/cpu.c   	for_each_present_cpu(cpu) {
cpu              1781 kernel/cpu.c   		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu              1787 kernel/cpu.c   		ret = cpuhp_issue_call(cpu, state, true, NULL);
cpu              1790 kernel/cpu.c   				cpuhp_rollback_install(cpu, state, NULL);
cpu              1809 kernel/cpu.c   			int (*startup)(unsigned int cpu),
cpu              1810 kernel/cpu.c   			int (*teardown)(unsigned int cpu),
cpu              1827 kernel/cpu.c   	int cpu;
cpu              1844 kernel/cpu.c   	for_each_present_cpu(cpu) {
cpu              1845 kernel/cpu.c   		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu              1849 kernel/cpu.c   			cpuhp_issue_call(cpu, state, false, node);
cpu              1874 kernel/cpu.c   	int cpu;
cpu              1896 kernel/cpu.c   	for_each_present_cpu(cpu) {
cpu              1897 kernel/cpu.c   		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
cpu              1901 kernel/cpu.c   			cpuhp_issue_call(cpu, state, false, NULL);
cpu              1918 kernel/cpu.c   static void cpuhp_offline_cpu_device(unsigned int cpu)
cpu              1920 kernel/cpu.c   	struct device *dev = get_cpu_device(cpu);
cpu              1927 kernel/cpu.c   static void cpuhp_online_cpu_device(unsigned int cpu)
cpu              1929 kernel/cpu.c   	struct device *dev = get_cpu_device(cpu);
cpu              1938 kernel/cpu.c   	int cpu, ret = 0;
cpu              1941 kernel/cpu.c   	for_each_online_cpu(cpu) {
cpu              1942 kernel/cpu.c   		if (topology_is_primary_thread(cpu))
cpu              1944 kernel/cpu.c   		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
cpu              1960 kernel/cpu.c   		cpuhp_offline_cpu_device(cpu);
cpu              1970 kernel/cpu.c   	int cpu, ret = 0;
cpu              1974 kernel/cpu.c   	for_each_present_cpu(cpu) {
cpu              1976 kernel/cpu.c   		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
cpu              1978 kernel/cpu.c   		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
cpu              1982 kernel/cpu.c   		cpuhp_online_cpu_device(cpu);
cpu              2244 kernel/cpu.c   	int cpu, ret;
cpu              2255 kernel/cpu.c   	for_each_possible_cpu(cpu) {
cpu              2256 kernel/cpu.c   		struct device *dev = get_cpu_device(cpu);
cpu              2332 kernel/cpu.c   void set_cpu_online(unsigned int cpu, bool online)
cpu              2345 kernel/cpu.c   		if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
cpu              2348 kernel/cpu.c   		if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
cpu              2358 kernel/cpu.c   	int cpu = smp_processor_id();
cpu              2361 kernel/cpu.c   	set_cpu_online(cpu, true);
cpu              2362 kernel/cpu.c   	set_cpu_active(cpu, true);
cpu              2363 kernel/cpu.c   	set_cpu_present(cpu, true);
cpu              2364 kernel/cpu.c   	set_cpu_possible(cpu, true);
cpu              2367 kernel/cpu.c   	__boot_cpu_id = cpu;
cpu               249 kernel/debug/debug_core.c 	int cpu;
cpu               252 kernel/debug/debug_core.c 	for_each_online_cpu(cpu) {
cpu               254 kernel/debug/debug_core.c 		if (cpu == this_cpu)
cpu               257 kernel/debug/debug_core.c 		csd = &per_cpu(kgdb_roundup_csd, cpu);
cpu               267 kernel/debug/debug_core.c 		if (kgdb_info[cpu].rounding_up)
cpu               269 kernel/debug/debug_core.c 		kgdb_info[cpu].rounding_up = true;
cpu               272 kernel/debug/debug_core.c 		ret = smp_call_function_single_async(cpu, csd);
cpu               274 kernel/debug/debug_core.c 			kgdb_info[cpu].rounding_up = false;
cpu               531 kernel/debug/debug_core.c 	int cpu;
cpu               536 kernel/debug/debug_core.c 	kgdb_info[ks->cpu].enter_kgdb++;
cpu               537 kernel/debug/debug_core.c 	kgdb_info[ks->cpu].exception_state |= exception_state;
cpu               554 kernel/debug/debug_core.c 	cpu = ks->cpu;
cpu               555 kernel/debug/debug_core.c 	kgdb_info[cpu].debuggerinfo = regs;
cpu               556 kernel/debug/debug_core.c 	kgdb_info[cpu].task = current;
cpu               557 kernel/debug/debug_core.c 	kgdb_info[cpu].ret_state = 0;
cpu               558 kernel/debug/debug_core.c 	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
cpu               565 kernel/debug/debug_core.c 			atomic_xchg(&kgdb_active, cpu);
cpu               575 kernel/debug/debug_core.c 		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
cpu               576 kernel/debug/debug_core.c 			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
cpu               578 kernel/debug/debug_core.c 		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
cpu               580 kernel/debug/debug_core.c 				atomic_xchg(&kgdb_active, cpu);
cpu               583 kernel/debug/debug_core.c 		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
cpu               595 kernel/debug/debug_core.c 			kgdb_info[cpu].debuggerinfo = NULL;
cpu               596 kernel/debug/debug_core.c 			kgdb_info[cpu].task = NULL;
cpu               597 kernel/debug/debug_core.c 			kgdb_info[cpu].exception_state &=
cpu               599 kernel/debug/debug_core.c 			kgdb_info[cpu].enter_kgdb--;
cpu               616 kernel/debug/debug_core.c 	    (kgdb_info[cpu].task &&
cpu               617 kernel/debug/debug_core.c 	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
cpu               627 kernel/debug/debug_core.c 		kgdb_info[cpu].ret_state = 1;
cpu               700 kernel/debug/debug_core.c 			kgdb_info[cpu].ret_state = error;
cpu               729 kernel/debug/debug_core.c 	kgdb_info[cpu].debuggerinfo = NULL;
cpu               730 kernel/debug/debug_core.c 	kgdb_info[cpu].task = NULL;
cpu               731 kernel/debug/debug_core.c 	kgdb_info[cpu].exception_state &=
cpu               733 kernel/debug/debug_core.c 	kgdb_info[cpu].enter_kgdb--;
cpu               742 kernel/debug/debug_core.c 	return kgdb_info[cpu].ret_state;
cpu               771 kernel/debug/debug_core.c 	ks->cpu			= raw_smp_processor_id();
cpu               779 kernel/debug/debug_core.c 	if (kgdb_info[ks->cpu].enter_kgdb != 0)
cpu               802 kernel/debug/debug_core.c int kgdb_nmicallback(int cpu, void *regs)
cpu               808 kernel/debug/debug_core.c 	kgdb_info[cpu].rounding_up = false;
cpu               811 kernel/debug/debug_core.c 	ks->cpu			= cpu;
cpu               814 kernel/debug/debug_core.c 	if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
cpu               823 kernel/debug/debug_core.c int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
cpu               830 kernel/debug/debug_core.c 	if (kgdb_info[cpu].enter_kgdb == 0) {
cpu               835 kernel/debug/debug_core.c 		ks->cpu			= cpu;
cpu                23 kernel/debug/debug_core.h 	int			cpu;
cpu               502 kernel/debug/gdbstub.c 		thread = kgdb_info[ks->cpu].task;
cpu               503 kernel/debug/gdbstub.c 		local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
cpu               705 kernel/debug/gdbstub.c 	int cpu;
cpu               719 kernel/debug/gdbstub.c 			for_each_online_cpu(cpu) {
cpu               721 kernel/debug/gdbstub.c 				int_to_threadref(thref, -cpu - 2);
cpu               951 kernel/debug/gdbstub.c 	kgdb_usethread = kgdb_info[ks->cpu].task;
cpu               952 kernel/debug/gdbstub.c 	ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
cpu               120 kernel/debug/kdb/kdb_bt.c 		unsigned long cpu;
cpu               126 kernel/debug/kdb/kdb_bt.c 		for_each_online_cpu(cpu) {
cpu               127 kernel/debug/kdb/kdb_bt.c 			p = kdb_curr_task(cpu);
cpu               164 kernel/debug/kdb/kdb_bt.c 		unsigned long cpu = ~0;
cpu               170 kernel/debug/kdb/kdb_bt.c 			diag = kdbgetularg((char *)argv[1], &cpu);
cpu               177 kernel/debug/kdb/kdb_bt.c 		if (cpu != ~0) {
cpu               178 kernel/debug/kdb/kdb_bt.c 			if (cpu >= num_possible_cpus() || !cpu_online(cpu)) {
cpu               179 kernel/debug/kdb/kdb_bt.c 				kdb_printf("no process for cpu %ld\n", cpu);
cpu               182 kernel/debug/kdb/kdb_bt.c 			sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
cpu               188 kernel/debug/kdb/kdb_bt.c 		for_each_online_cpu(cpu) {
cpu               189 kernel/debug/kdb/kdb_bt.c 			void *kdb_tsk = KDB_TSK(cpu);
cpu               194 kernel/debug/kdb/kdb_bt.c 					   cpu);
cpu                40 kernel/debug/kdb/kdb_debugger.c 	kdb_current_task = kgdb_info[ks->cpu].task;
cpu                41 kernel/debug/kdb/kdb_debugger.c 	kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
cpu               160 kernel/debug/kdb/kdb_debugger.c 	kgdb_info[ks->cpu].ret_state = gdbstub_state(ks, "e");
cpu               162 kernel/debug/kdb/kdb_debugger.c 		kgdb_info[ks->cpu].ret_state = 1;
cpu               173 kernel/debug/kdb/kdb_debugger.c 	return kgdb_info[ks->cpu].ret_state;
cpu               190 kernel/debug/kdb/kdb_main.c struct task_struct *kdb_curr_task(int cpu)
cpu               192 kernel/debug/kdb/kdb_main.c 	struct task_struct *p = curr_task(cpu);
cpu               194 kernel/debug/kdb/kdb_main.c 	if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && KDB_TSK(cpu))
cpu              2291 kernel/debug/kdb/kdb_main.c 	unsigned long cpu;
cpu              2293 kernel/debug/kdb/kdb_main.c 	for_each_online_cpu(cpu) {
cpu              2294 kernel/debug/kdb/kdb_main.c 		p = kdb_curr_task(cpu);
cpu              2322 kernel/debug/kdb/kdb_main.c 	int cpu;
cpu              2328 kernel/debug/kdb/kdb_main.c 	cpu = kdb_process_cpu(p);
cpu              2337 kernel/debug/kdb/kdb_main.c 		if (!KDB_TSK(cpu)) {
cpu              2340 kernel/debug/kdb/kdb_main.c 			if (KDB_TSK(cpu) != p)
cpu              2342 kernel/debug/kdb/kdb_main.c 				   "process table (0x%px)\n", KDB_TSK(cpu));
cpu              2350 kernel/debug/kdb/kdb_main.c 	unsigned long mask, cpu;
cpu              2359 kernel/debug/kdb/kdb_main.c 	for_each_online_cpu(cpu) {
cpu              2362 kernel/debug/kdb/kdb_main.c 		p = kdb_curr_task(cpu);
cpu              2562 kernel/debug/kdb/kdb_main.c 	int cpu, diag, nextarg = 1;
cpu              2596 kernel/debug/kdb/kdb_main.c #define KDB_PCU(cpu) __per_cpu_offset(cpu)
cpu              2599 kernel/debug/kdb/kdb_main.c #define KDB_PCU(cpu) __per_cpu_offset[cpu]
cpu              2601 kernel/debug/kdb/kdb_main.c #define KDB_PCU(cpu) 0
cpu              2604 kernel/debug/kdb/kdb_main.c 	for_each_online_cpu(cpu) {
cpu              2608 kernel/debug/kdb/kdb_main.c 		if (whichcpu != ~0UL && whichcpu != cpu)
cpu              2610 kernel/debug/kdb/kdb_main.c 		addr = symaddr + KDB_PCU(cpu);
cpu              2614 kernel/debug/kdb/kdb_main.c 				   "read, diag=%d\n", cpu, addr, diag);
cpu              2617 kernel/debug/kdb/kdb_main.c 		kdb_printf("%5d ", cpu);
cpu               225 kernel/debug/kdb/kdb_private.h #define KDB_TSK(cpu) kgdb_info[cpu].task
cpu               226 kernel/debug/kdb/kdb_private.h #define KDB_TSKREGS(cpu) kgdb_info[cpu].debuggerinfo
cpu               623 kernel/debug/kdb/kdb_support.c 	int cpu;
cpu               630 kernel/debug/kdb/kdb_support.c 	cpu = kdb_process_cpu(p);
cpu               642 kernel/debug/kdb/kdb_support.c 		if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) {
cpu               643 kernel/debug/kdb/kdb_support.c 			if (cpu != kdb_initial_cpu)
cpu                51 kernel/events/callchain.c 	int cpu;
cpu                55 kernel/events/callchain.c 	for_each_possible_cpu(cpu)
cpu                56 kernel/events/callchain.c 		kfree(entries->cpu_entries[cpu]);
cpu                72 kernel/events/callchain.c 	int cpu;
cpu                89 kernel/events/callchain.c 	for_each_possible_cpu(cpu) {
cpu                90 kernel/events/callchain.c 		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
cpu                91 kernel/events/callchain.c 							 cpu_to_node(cpu));
cpu                92 kernel/events/callchain.c 		if (!entries->cpu_entries[cpu])
cpu               101 kernel/events/callchain.c 	for_each_possible_cpu(cpu)
cpu               102 kernel/events/callchain.c 		kfree(entries->cpu_entries[cpu]);
cpu               154 kernel/events/callchain.c 	int cpu;
cpu               165 kernel/events/callchain.c 	cpu = smp_processor_id();
cpu               167 kernel/events/callchain.c 	return (((void *)entries->cpu_entries[cpu]) +
cpu               136 kernel/events/core.c static int cpu_function_call(int cpu, remote_function_f func, void *info)
cpu               145 kernel/events/core.c 	smp_call_function_single(cpu, remote_function, &data, 1);
cpu               273 kernel/events/core.c 		cpu_function_call(event->cpu, event_function, &efs);
cpu               714 kernel/events/core.c 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
cpu               939 kernel/events/core.c 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
cpu              1090 kernel/events/core.c static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
cpu              1533 kernel/events/core.c 	if (left->cpu < right->cpu)
cpu              1535 kernel/events/core.c 	if (left->cpu > right->cpu)
cpu              1620 kernel/events/core.c perf_event_groups_first(struct perf_event_groups *groups, int cpu)
cpu              1628 kernel/events/core.c 		if (cpu < node_event->cpu) {
cpu              1630 kernel/events/core.c 		} else if (cpu > node_event->cpu) {
cpu              1650 kernel/events/core.c 	if (next && next->cpu == event->cpu)
cpu              2073 kernel/events/core.c 	return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
cpu              2657 kernel/events/core.c 			int cpu)
cpu              2665 kernel/events/core.c 	if (event->cpu != -1)
cpu              2666 kernel/events/core.c 		event->cpu = cpu;
cpu              2675 kernel/events/core.c 		cpu_function_call(cpu, __perf_install_in_context, event);
cpu              3350 kernel/events/core.c static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
cpu              3357 kernel/events/core.c 	evt2 = perf_event_groups_first(groups, cpu);
cpu              4055 kernel/events/core.c 	    event->cpu != smp_processor_id()) {
cpu              4233 kernel/events/core.c 	int cpu = event->cpu;
cpu              4240 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpu              4339 kernel/events/core.c 	struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
cpu              4371 kernel/events/core.c static void unaccount_event_cpu(struct perf_event *event, int cpu)
cpu              4377 kernel/events/core.c 		atomic_dec(&per_cpu(perf_cgroup_events, cpu));
cpu              4439 kernel/events/core.c 	unaccount_event_cpu(event, event->cpu);
cpu              4512 kernel/events/core.c 	    (e1->cpu == e2->cpu ||
cpu              4513 kernel/events/core.c 	     e1->cpu == -1 ||
cpu              4514 kernel/events/core.c 	     e2->cpu == -1))
cpu              5721 kernel/events/core.c 	if (event->cpu == -1 && event->attr.inherit)
cpu              5874 kernel/events/core.c 			      event->cpu, flags);
cpu              5975 kernel/events/core.c 	int cpu = READ_ONCE(event->pending_disable);
cpu              5977 kernel/events/core.c 	if (cpu < 0)
cpu              5980 kernel/events/core.c 	if (cpu == smp_processor_id()) {
cpu              6006 kernel/events/core.c 	irq_work_queue_on(&event->pending, cpu);
cpu              6215 kernel/events/core.c 		data->cpu_entry.cpu	 = raw_smp_processor_id();
cpu              6985 kernel/events/core.c 	int err, cpu;
cpu              6996 kernel/events/core.c 		cpu = iter->cpu;
cpu              6997 kernel/events/core.c 		if (cpu == -1)
cpu              6998 kernel/events/core.c 			cpu = READ_ONCE(iter->oncpu);
cpu              7000 kernel/events/core.c 		if (cpu == -1)
cpu              7003 kernel/events/core.c 		err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
cpu              8539 kernel/events/core.c static void swevent_hlist_put_cpu(int cpu)
cpu              8541 kernel/events/core.c 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
cpu              8553 kernel/events/core.c 	int cpu;
cpu              8555 kernel/events/core.c 	for_each_possible_cpu(cpu)
cpu              8556 kernel/events/core.c 		swevent_hlist_put_cpu(cpu);
cpu              8559 kernel/events/core.c static int swevent_hlist_get_cpu(int cpu)
cpu              8561 kernel/events/core.c 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
cpu              8566 kernel/events/core.c 	    cpumask_test_cpu(cpu, perf_online_mask)) {
cpu              8585 kernel/events/core.c 	int err, cpu, failed_cpu;
cpu              8588 kernel/events/core.c 	for_each_possible_cpu(cpu) {
cpu              8589 kernel/events/core.c 		err = swevent_hlist_get_cpu(cpu);
cpu              8591 kernel/events/core.c 			failed_cpu = cpu;
cpu              8598 kernel/events/core.c 	for_each_possible_cpu(cpu) {
cpu              8599 kernel/events/core.c 		if (cpu == failed_cpu)
cpu              8601 kernel/events/core.c 		swevent_hlist_put_cpu(cpu);
cpu              8760 kernel/events/core.c 			if (event->cpu != smp_processor_id())
cpu              9158 kernel/events/core.c 	int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
cpu              9957 kernel/events/core.c 	int timer, cpu, ret;
cpu              9975 kernel/events/core.c 	for_each_online_cpu(cpu) {
cpu              9977 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpu              9980 kernel/events/core.c 		cpu_function_call(cpu,
cpu              10058 kernel/events/core.c 	int cpu, ret;
cpu              10111 kernel/events/core.c 	for_each_possible_cpu(cpu) {
cpu              10114 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpu              10119 kernel/events/core.c 		cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
cpu              10121 kernel/events/core.c 		__perf_mux_hrtimer_init(cpuctx, cpu);
cpu              10301 kernel/events/core.c 	struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
cpu              10321 kernel/events/core.c static void account_event_cpu(struct perf_event *event, int cpu)
cpu              10327 kernel/events/core.c 		atomic_inc(&per_cpu(perf_cgroup_events, cpu));
cpu              10411 kernel/events/core.c 	account_event_cpu(event, event->cpu);
cpu              10420 kernel/events/core.c perf_event_alloc(struct perf_event_attr *attr, int cpu,
cpu              10432 kernel/events/core.c 	if ((unsigned)cpu >= nr_cpu_ids) {
cpu              10433 kernel/events/core.c 		if (!task || cpu != -1)
cpu              10469 kernel/events/core.c 	event->cpu		= cpu;
cpu              10762 kernel/events/core.c 	if (output_event->cpu != event->cpu)
cpu              10768 kernel/events/core.c 	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
cpu              10901 kernel/events/core.c 		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
cpu              10961 kernel/events/core.c 	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
cpu              11017 kernel/events/core.c 	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
cpu              11101 kernel/events/core.c 		if (group_leader->cpu != event->cpu)
cpu              11260 kernel/events/core.c 			perf_install_in_context(ctx, sibling, sibling->cpu);
cpu              11270 kernel/events/core.c 		perf_install_in_context(ctx, group_leader, group_leader->cpu);
cpu              11285 kernel/events/core.c 	perf_install_in_context(ctx, event, event->cpu);
cpu              11348 kernel/events/core.c perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
cpu              11364 kernel/events/core.c 	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
cpu              11410 kernel/events/core.c 	perf_install_in_context(ctx, event, event->cpu);
cpu              11812 kernel/events/core.c 					   parent_event->cpu,
cpu              12110 kernel/events/core.c 	int cpu;
cpu              12114 kernel/events/core.c 	for_each_possible_cpu(cpu) {
cpu              12115 kernel/events/core.c 		swhash = &per_cpu(swevent_htable, cpu);
cpu              12117 kernel/events/core.c 		INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
cpu              12119 kernel/events/core.c 		INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
cpu              12120 kernel/events/core.c 		raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
cpu              12123 kernel/events/core.c 		INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
cpu              12125 kernel/events/core.c 		INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
cpu              12129 kernel/events/core.c static void perf_swevent_init_cpu(unsigned int cpu)
cpu              12131 kernel/events/core.c 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
cpu              12137 kernel/events/core.c 		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
cpu              12158 kernel/events/core.c static void perf_event_exit_cpu_context(int cpu)
cpu              12166 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpu              12170 kernel/events/core.c 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
cpu              12174 kernel/events/core.c 	cpumask_clear_cpu(cpu, perf_online_mask);
cpu              12179 kernel/events/core.c static void perf_event_exit_cpu_context(int cpu) { }
cpu              12183 kernel/events/core.c int perf_event_init_cpu(unsigned int cpu)
cpu              12189 kernel/events/core.c 	perf_swevent_init_cpu(cpu);
cpu              12192 kernel/events/core.c 	cpumask_set_cpu(cpu, perf_online_mask);
cpu              12194 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
cpu              12206 kernel/events/core.c int perf_event_exit_cpu(unsigned int cpu)
cpu              12208 kernel/events/core.c 	perf_event_exit_cpu_context(cpu);
cpu              12215 kernel/events/core.c 	int cpu;
cpu              12217 kernel/events/core.c 	for_each_online_cpu(cpu)
cpu              12218 kernel/events/core.c 		perf_event_exit_cpu(cpu);
cpu                52 kernel/events/hw_breakpoint.c static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
cpu                54 kernel/events/hw_breakpoint.c 	return per_cpu_ptr(bp_cpuinfo + type, cpu);
cpu                88 kernel/events/hw_breakpoint.c static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
cpu                90 kernel/events/hw_breakpoint.c 	unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
cpu               105 kernel/events/hw_breakpoint.c static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
cpu               114 kernel/events/hw_breakpoint.c 		    (iter->cpu < 0 || cpu == iter->cpu))
cpu               123 kernel/events/hw_breakpoint.c 	if (bp->cpu >= 0)
cpu               124 kernel/events/hw_breakpoint.c 		return cpumask_of(bp->cpu);
cpu               137 kernel/events/hw_breakpoint.c 	int cpu;
cpu               139 kernel/events/hw_breakpoint.c 	for_each_cpu(cpu, cpumask) {
cpu               140 kernel/events/hw_breakpoint.c 		struct bp_cpuinfo *info = get_bp_info(cpu, type);
cpu               145 kernel/events/hw_breakpoint.c 			nr += max_task_bp_pinned(cpu, type);
cpu               147 kernel/events/hw_breakpoint.c 			nr += task_bp_pinned(cpu, bp, type);
cpu               172 kernel/events/hw_breakpoint.c static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
cpu               175 kernel/events/hw_breakpoint.c 	unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
cpu               178 kernel/events/hw_breakpoint.c 	old_idx = task_bp_pinned(cpu, bp, type) - 1;
cpu               195 kernel/events/hw_breakpoint.c 	int cpu;
cpu               202 kernel/events/hw_breakpoint.c 		get_bp_info(bp->cpu, type)->cpu_pinned += weight;
cpu               207 kernel/events/hw_breakpoint.c 	for_each_cpu(cpu, cpumask)
cpu               208 kernel/events/hw_breakpoint.c 		toggle_bp_task_slot(bp, cpu, type, weight);
cpu               547 kernel/events/hw_breakpoint.c 	int cpu;
cpu               554 kernel/events/hw_breakpoint.c 	for_each_online_cpu(cpu) {
cpu               555 kernel/events/hw_breakpoint.c 		bp = perf_event_create_kernel_counter(attr, cpu, NULL,
cpu               562 kernel/events/hw_breakpoint.c 		per_cpu(*cpu_events, cpu) = bp;
cpu               580 kernel/events/hw_breakpoint.c 	int cpu;
cpu               582 kernel/events/hw_breakpoint.c 	for_each_possible_cpu(cpu)
cpu               583 kernel/events/hw_breakpoint.c 		unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
cpu               663 kernel/events/hw_breakpoint.c 	int cpu, err_cpu;
cpu               669 kernel/events/hw_breakpoint.c 	for_each_possible_cpu(cpu) {
cpu               671 kernel/events/hw_breakpoint.c 			struct bp_cpuinfo *info = get_bp_info(cpu, i);
cpu               690 kernel/events/hw_breakpoint.c 		if (err_cpu == cpu)
cpu                79 kernel/events/internal.h rb_alloc(int nr_pages, long watermark, int cpu, int flags);
cpu               633 kernel/events/ring_buffer.c 	int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
cpu               744 kernel/events/ring_buffer.c static void *perf_mmap_alloc_page(int cpu)
cpu               749 kernel/events/ring_buffer.c 	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
cpu               757 kernel/events/ring_buffer.c struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
cpu               773 kernel/events/ring_buffer.c 	rb->user_page = perf_mmap_alloc_page(cpu);
cpu               778 kernel/events/ring_buffer.c 		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
cpu               866 kernel/events/ring_buffer.c struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
cpu               151 kernel/fork.c  	int cpu;
cpu               154 kernel/fork.c  	for_each_possible_cpu(cpu)
cpu               155 kernel/fork.c  		total += per_cpu(process_counts, cpu);
cpu               194 kernel/fork.c  static int free_vm_stack_cache(unsigned int cpu)
cpu               196 kernel/fork.c  	struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
cpu              2300 kernel/fork.c  struct task_struct *fork_idle(int cpu)
cpu              2307 kernel/fork.c  	task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
cpu              2310 kernel/fork.c  		init_idle(task, cpu);
cpu                16 kernel/irq/affinity.c 	int cpu, sibl;
cpu                19 kernel/irq/affinity.c 		cpu = cpumask_first(nmsk);
cpu                22 kernel/irq/affinity.c 		if (cpu >= nr_cpu_ids)
cpu                25 kernel/irq/affinity.c 		cpumask_clear_cpu(cpu, nmsk);
cpu                26 kernel/irq/affinity.c 		cpumask_set_cpu(cpu, irqmsk);
cpu                30 kernel/irq/affinity.c 		siblmsk = topology_sibling_cpumask(cpu);
cpu                77 kernel/irq/affinity.c 	int cpu;
cpu                79 kernel/irq/affinity.c 	for_each_possible_cpu(cpu)
cpu                80 kernel/irq/affinity.c 		cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
cpu               388 kernel/irq/chip.c void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
cpu               394 kernel/irq/chip.c 	cpumask_set_cpu(cpu, desc->percpu_enabled);
cpu               397 kernel/irq/chip.c void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
cpu               403 kernel/irq/chip.c 	cpumask_clear_cpu(cpu, desc->percpu_enabled);
cpu               944 kernel/irq/chip.c 		unsigned int cpu = smp_processor_id();
cpu               945 kernel/irq/chip.c 		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
cpu               948 kernel/irq/chip.c 			irq_percpu_disable(desc, cpu);
cpu               951 kernel/irq/chip.c 			    enabled ? " and unmasked" : "", irq, cpu);
cpu                22 kernel/irq/cpuhotplug.c 	unsigned int cpu = smp_processor_id();
cpu                38 kernel/irq/cpuhotplug.c 	if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
cpu                45 kernel/irq/cpuhotplug.c 			cpumask_pr_args(m), d->irq, cpu);
cpu                49 kernel/irq/cpuhotplug.c 	return cpumask_test_cpu(cpu, m);
cpu               174 kernel/irq/cpuhotplug.c static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
cpu               180 kernel/irq/cpuhotplug.c 	    !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
cpu               201 kernel/irq/cpuhotplug.c int irq_affinity_online_cpu(unsigned int cpu)
cpu               210 kernel/irq/cpuhotplug.c 		irq_restore_affinity_of_irq(desc, cpu);
cpu                88 kernel/irq/internals.h extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
cpu                89 kernel/irq/internals.h extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
cpu               162 kernel/irq/ipi.c irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
cpu               167 kernel/irq/ipi.c 	if (!data || !ipimask || cpu >= nr_cpu_ids)
cpu               170 kernel/irq/ipi.c 	if (!cpumask_test_cpu(cpu, ipimask))
cpu               180 kernel/irq/ipi.c 		data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
cpu               187 kernel/irq/ipi.c 			   const struct cpumask *dest, unsigned int cpu)
cpu               197 kernel/irq/ipi.c 	if (cpu >= nr_cpu_ids)
cpu               204 kernel/irq/ipi.c 		if (!cpumask_test_cpu(cpu, ipimask))
cpu               221 kernel/irq/ipi.c int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
cpu               232 kernel/irq/ipi.c 	if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
cpu               236 kernel/irq/ipi.c 		chip->ipi_send_mask(data, cpumask_of(cpu));
cpu               242 kernel/irq/ipi.c 	    cpu != data->common->ipi_offset) {
cpu               244 kernel/irq/ipi.c 		unsigned irq = data->irq + cpu - data->common->ipi_offset;
cpu               248 kernel/irq/ipi.c 	chip->ipi_send_single(data, cpu);
cpu               267 kernel/irq/ipi.c 	unsigned int cpu;
cpu               286 kernel/irq/ipi.c 		for_each_cpu(cpu, dest) {
cpu               287 kernel/irq/ipi.c 			unsigned irq = base + cpu - data->common->ipi_offset;
cpu               290 kernel/irq/ipi.c 			chip->ipi_send_single(data, cpu);
cpu               293 kernel/irq/ipi.c 		for_each_cpu(cpu, dest)
cpu               294 kernel/irq/ipi.c 			chip->ipi_send_single(data, cpu);
cpu               307 kernel/irq/ipi.c int ipi_send_single(unsigned int virq, unsigned int cpu)
cpu               313 kernel/irq/ipi.c 	if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
cpu               316 kernel/irq/ipi.c 	return __ipi_send_single(desc, cpu);
cpu               106 kernel/irq/irqdesc.c 	int cpu;
cpu               125 kernel/irq/irqdesc.c 	for_each_possible_cpu(cpu)
cpu               126 kernel/irq/irqdesc.c 		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
cpu               150 kernel/irq/irqdesc.c 	int cpu, irq = desc->irq_data.irq;
cpu               154 kernel/irq/irqdesc.c 	for_each_possible_cpu(cpu) {
cpu               155 kernel/irq/irqdesc.c 		unsigned int c = kstat_irqs_cpu(irq, cpu);
cpu               958 kernel/irq/irqdesc.c unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
cpu               963 kernel/irq/irqdesc.c 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
cpu               983 kernel/irq/irqdesc.c 	int cpu;
cpu               992 kernel/irq/irqdesc.c 	for_each_possible_cpu(cpu)
cpu               993 kernel/irq/irqdesc.c 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
cpu              2191 kernel/irq/manage.c 	unsigned int cpu = smp_processor_id();
cpu              2217 kernel/irq/manage.c 	irq_percpu_enable(desc, cpu);
cpu              2237 kernel/irq/manage.c 	unsigned int cpu = smp_processor_id();
cpu              2246 kernel/irq/manage.c 	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
cpu              2255 kernel/irq/manage.c 	unsigned int cpu = smp_processor_id();
cpu              2262 kernel/irq/manage.c 	irq_percpu_disable(desc, cpu);
cpu               132 kernel/irq/matrix.c 	unsigned int cpu, best_cpu, maxavl = 0;
cpu               137 kernel/irq/matrix.c 	for_each_cpu(cpu, msk) {
cpu               138 kernel/irq/matrix.c 		cm = per_cpu_ptr(m->maps, cpu);
cpu               143 kernel/irq/matrix.c 		best_cpu = cpu;
cpu               153 kernel/irq/matrix.c 	unsigned int cpu, best_cpu, allocated = UINT_MAX;
cpu               158 kernel/irq/matrix.c 	for_each_cpu(cpu, msk) {
cpu               159 kernel/irq/matrix.c 		cm = per_cpu_ptr(m->maps, cpu);
cpu               164 kernel/irq/matrix.c 		best_cpu = cpu;
cpu               212 kernel/irq/matrix.c 	unsigned int cpu, failed_cpu;
cpu               214 kernel/irq/matrix.c 	for_each_cpu(cpu, msk) {
cpu               215 kernel/irq/matrix.c 		struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
cpu               226 kernel/irq/matrix.c 		trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
cpu               230 kernel/irq/matrix.c 	failed_cpu = cpu;
cpu               231 kernel/irq/matrix.c 	for_each_cpu(cpu, msk) {
cpu               232 kernel/irq/matrix.c 		if (cpu == failed_cpu)
cpu               234 kernel/irq/matrix.c 		irq_matrix_remove_managed(m, cpumask_of(cpu));
cpu               253 kernel/irq/matrix.c 	unsigned int cpu;
cpu               255 kernel/irq/matrix.c 	for_each_cpu(cpu, msk) {
cpu               256 kernel/irq/matrix.c 		struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
cpu               276 kernel/irq/matrix.c 		trace_irq_matrix_remove_managed(bit, cpu, m, cm);
cpu               288 kernel/irq/matrix.c 	unsigned int bit, cpu, end = m->alloc_end;
cpu               294 kernel/irq/matrix.c 	cpu = matrix_find_best_cpu_managed(m, msk);
cpu               295 kernel/irq/matrix.c 	if (cpu == UINT_MAX)
cpu               298 kernel/irq/matrix.c 	cm = per_cpu_ptr(m->maps, cpu);
cpu               309 kernel/irq/matrix.c 	*mapped_cpu = cpu;
cpu               310 kernel/irq/matrix.c 	trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
cpu               380 kernel/irq/matrix.c 	unsigned int cpu, bit;
cpu               383 kernel/irq/matrix.c 	cpu = matrix_find_best_cpu(m, msk);
cpu               384 kernel/irq/matrix.c 	if (cpu == UINT_MAX)
cpu               387 kernel/irq/matrix.c 	cm = per_cpu_ptr(m->maps, cpu);
cpu               397 kernel/irq/matrix.c 	*mapped_cpu = cpu;
cpu               398 kernel/irq/matrix.c 	trace_irq_matrix_alloc(bit, cpu, m, cm);
cpu               411 kernel/irq/matrix.c void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
cpu               414 kernel/irq/matrix.c 	struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
cpu               432 kernel/irq/matrix.c 	trace_irq_matrix_free(bit, cpu, m, cm);
cpu               484 kernel/irq/matrix.c 	int cpu;
cpu               494 kernel/irq/matrix.c 	for_each_online_cpu(cpu) {
cpu               495 kernel/irq/matrix.c 		struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
cpu               498 kernel/irq/matrix.c 			   cpu, cm->available, cm->managed,
cpu                96 kernel/irq_work.c bool irq_work_queue_on(struct irq_work *work, int cpu)
cpu               103 kernel/irq_work.c 	WARN_ON_ONCE(cpu_is_offline(cpu));
cpu               110 kernel/irq_work.c 	if (cpu != smp_processor_id()) {
cpu               113 kernel/irq_work.c 		if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
cpu               114 kernel/irq_work.c 			arch_send_call_function_single_ipi(cpu);
cpu              1055 kernel/kexec_core.c void crash_save_cpu(struct pt_regs *regs, int cpu)
cpu              1060 kernel/kexec_core.c 	if ((cpu < 0) || (cpu >= nr_cpu_ids))
cpu              1070 kernel/kexec_core.c 	buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
cpu              1247 kernel/kexec_file.c 	unsigned int cpu, i;
cpu              1287 kernel/kexec_file.c 	for_each_present_cpu(cpu) {
cpu              1289 kernel/kexec_file.c 		notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
cpu                48 kernel/kthread.c 	unsigned int cpu;
cpu               411 kernel/kthread.c static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
cpu               413 kernel/kthread.c 	__kthread_bind_mask(p, cpumask_of(cpu), state);
cpu               430 kernel/kthread.c void kthread_bind(struct task_struct *p, unsigned int cpu)
cpu               432 kernel/kthread.c 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
cpu               448 kernel/kthread.c 					  void *data, unsigned int cpu,
cpu               453 kernel/kthread.c 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
cpu               454 kernel/kthread.c 				   cpu);
cpu               457 kernel/kthread.c 	kthread_bind(p, cpu);
cpu               460 kernel/kthread.c 	to_kthread(p)->cpu = cpu;
cpu               481 kernel/kthread.c 		__kthread_bind(k, kthread->cpu, TASK_PARKED);
cpu               682 kernel/kthread.c __kthread_create_worker(int cpu, unsigned int flags,
cpu               695 kernel/kthread.c 	if (cpu >= 0)
cpu               696 kernel/kthread.c 		node = cpu_to_node(cpu);
cpu               703 kernel/kthread.c 	if (cpu >= 0)
cpu               704 kernel/kthread.c 		kthread_bind(task, cpu);
cpu               757 kernel/kthread.c kthread_create_worker_on_cpu(int cpu, unsigned int flags,
cpu               764 kernel/kthread.c 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
cpu                74 kernel/livepatch/transition.c 	unsigned int cpu;
cpu               116 kernel/livepatch/transition.c 	for_each_possible_cpu(cpu) {
cpu               117 kernel/livepatch/transition.c 		task = idle_task(cpu);
cpu               390 kernel/livepatch/transition.c 	unsigned int cpu;
cpu               416 kernel/livepatch/transition.c 	for_each_possible_cpu(cpu) {
cpu               417 kernel/livepatch/transition.c 		task = idle_task(cpu);
cpu               418 kernel/livepatch/transition.c 		if (cpu_online(cpu)) {
cpu               466 kernel/livepatch/transition.c 	unsigned int cpu;
cpu               490 kernel/livepatch/transition.c 	for_each_possible_cpu(cpu) {
cpu               491 kernel/livepatch/transition.c 		task = idle_task(cpu);
cpu               507 kernel/livepatch/transition.c 	unsigned int cpu;
cpu               539 kernel/livepatch/transition.c 	for_each_possible_cpu(cpu) {
cpu               540 kernel/livepatch/transition.c 		task = idle_task(cpu);
cpu               580 kernel/livepatch/transition.c 	unsigned int cpu;
cpu               602 kernel/livepatch/transition.c 	for_each_possible_cpu(cpu)
cpu               603 kernel/livepatch/transition.c 		clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
cpu               632 kernel/livepatch/transition.c 	unsigned int cpu;
cpu               641 kernel/livepatch/transition.c 	for_each_possible_cpu(cpu)
cpu               642 kernel/livepatch/transition.c 		klp_update_patch_state(idle_task(cpu));
cpu                65 kernel/locking/lock_events.c 	int cpu, id, len;
cpu                76 kernel/locking/lock_events.c 	for_each_possible_cpu(cpu)
cpu                77 kernel/locking/lock_events.c 		sum += per_cpu(lockevents[id], cpu);
cpu                91 kernel/locking/lock_events.c 	int cpu;
cpu                99 kernel/locking/lock_events.c 	for_each_possible_cpu(cpu) {
cpu               101 kernel/locking/lock_events.c 		unsigned long *ptr = per_cpu_ptr(lockevents, cpu);
cpu               232 kernel/locking/lockdep.c 	int cpu, i;
cpu               235 kernel/locking/lockdep.c 	for_each_possible_cpu(cpu) {
cpu               237 kernel/locking/lockdep.c 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
cpu               260 kernel/locking/lockdep.c 	int cpu;
cpu               262 kernel/locking/lockdep.c 	for_each_possible_cpu(cpu) {
cpu               264 kernel/locking/lockdep.c 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
cpu              3722 kernel/locking/lockdep.c 	lock->cpu = raw_smp_processor_id();
cpu              4656 kernel/locking/lockdep.c 	if (lock->cpu != smp_processor_id())
cpu              4668 kernel/locking/lockdep.c 	int i, cpu;
cpu              4687 kernel/locking/lockdep.c 	cpu = smp_processor_id();
cpu              4703 kernel/locking/lockdep.c 	if (lock->cpu != cpu)
cpu              4706 kernel/locking/lockdep.c 	lock->cpu = cpu;
cpu               229 kernel/locking/lockdep_internals.h 	int idx, cpu;
cpu               233 kernel/locking/lockdep_internals.h 	for_each_possible_cpu(cpu)
cpu               234 kernel/locking/lockdep_internals.h 		ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
cpu                27 kernel/locking/osq_lock.c 	return node->cpu - 1;
cpu                55 kernel/locking/osq_lock.c 	old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
cpu                99 kernel/locking/osq_lock.c 	node->cpu = curr;
cpu               115 kernel/locking/percpu-rwsem.c 	int cpu;							\
cpu               117 kernel/locking/percpu-rwsem.c 	for_each_possible_cpu(cpu)					\
cpu               118 kernel/locking/percpu-rwsem.c 		__sum += per_cpu(var, cpu);				\
cpu               114 kernel/locking/qspinlock.c static inline __pure u32 encode_tail(int cpu, int idx)
cpu               118 kernel/locking/qspinlock.c 	tail  = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
cpu               126 kernel/locking/qspinlock.c 	int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
cpu               129 kernel/locking/qspinlock.c 	return per_cpu_ptr(&qnodes[idx].mcs, cpu);
cpu                52 kernel/locking/qspinlock_paravirt.h 	int			cpu;
cpu               284 kernel/locking/qspinlock_paravirt.h 	pn->cpu = smp_processor_id();
cpu               533 kernel/locking/qspinlock_paravirt.h 	pv_kick(node->cpu);
cpu                40 kernel/locking/qspinlock_stat.h 	int cpu, id, len;
cpu                51 kernel/locking/qspinlock_stat.h 	for_each_possible_cpu(cpu) {
cpu                52 kernel/locking/qspinlock_stat.h 		sum += per_cpu(lockevents[id], cpu);
cpu                60 kernel/locking/qspinlock_stat.h 			kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
cpu                64 kernel/locking/qspinlock_stat.h 			kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
cpu               108 kernel/locking/qspinlock_stat.h static inline void __pv_kick(int cpu)
cpu               112 kernel/locking/qspinlock_stat.h 	per_cpu(pv_kick_time, cpu) = start;
cpu               113 kernel/locking/qspinlock_stat.h 	pv_kick(cpu);
cpu               682 kernel/module.c 	int cpu;
cpu               684 kernel/module.c 	for_each_possible_cpu(cpu)
cpu               685 kernel/module.c 		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
cpu               691 kernel/module.c 	unsigned int cpu;
cpu               700 kernel/module.c 		for_each_possible_cpu(cpu) {
cpu               701 kernel/module.c 			void *start = per_cpu_ptr(mod->percpu, cpu);
cpu                42 kernel/padata.c 	int cpu, target_cpu;
cpu                45 kernel/padata.c 	for (cpu = 0; cpu < cpu_index; cpu++)
cpu               107 kernel/padata.c 	int i, cpu, cpu_index, target_cpu, err;
cpu               126 kernel/padata.c 		cpu = cpumask_first(pd->cpumask.cbcpu);
cpu               128 kernel/padata.c 			cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
cpu               130 kernel/padata.c 		*cb_cpu = cpu;
cpu               147 kernel/padata.c 	padata->cpu = target_cpu;
cpu               181 kernel/padata.c 	int cpu = pd->cpu;
cpu               183 kernel/padata.c 	next_queue = per_cpu_ptr(pd->pqueue, cpu);
cpu               207 kernel/padata.c 		pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
cpu               268 kernel/padata.c 	next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
cpu               330 kernel/padata.c 							   padata->cpu);
cpu               401 kernel/padata.c 	int cpu;
cpu               404 kernel/padata.c 	for_each_cpu(cpu, pd->cpumask.cbcpu) {
cpu               405 kernel/padata.c 		squeue = per_cpu_ptr(pd->squeue, cpu);
cpu               415 kernel/padata.c 	int cpu;
cpu               418 kernel/padata.c 	for_each_cpu(cpu, pd->cpumask.pcpu) {
cpu               419 kernel/padata.c 		pqueue = per_cpu_ptr(pd->pqueue, cpu);
cpu               461 kernel/padata.c 	pd->cpu = cpumask_first(pd->cpumask.pcpu);
cpu               709 kernel/padata.c static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
cpu               713 kernel/padata.c 	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
cpu               724 kernel/padata.c static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
cpu               728 kernel/padata.c 	if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
cpu               750 kernel/padata.c int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
cpu               761 kernel/padata.c 		cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
cpu               763 kernel/padata.c 		cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
cpu               765 kernel/padata.c 	err = __padata_remove_cpu(pinst, cpu);
cpu               774 kernel/padata.c static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
cpu               776 kernel/padata.c 	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
cpu               777 kernel/padata.c 		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
cpu               780 kernel/padata.c static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
cpu               786 kernel/padata.c 	if (!pinst_has_cpu(pinst, cpu))
cpu               790 kernel/padata.c 	ret = __padata_add_cpu(pinst, cpu);
cpu               795 kernel/padata.c static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
cpu               801 kernel/padata.c 	if (!pinst_has_cpu(pinst, cpu))
cpu               805 kernel/padata.c 	ret = __padata_remove_cpu(pinst, cpu);
cpu               126 kernel/panic.c 	int old_cpu, cpu;
cpu               128 kernel/panic.c 	cpu = raw_smp_processor_id();
cpu               129 kernel/panic.c 	old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);
cpu               133 kernel/panic.c 	else if (old_cpu != cpu)
cpu                52 kernel/power/energy_model.c static void em_debug_create_pd(struct em_perf_domain *pd, int cpu)
cpu                58 kernel/power/energy_model.c 	snprintf(name, sizeof(name), "pd%d", cpu);
cpu                79 kernel/power/energy_model.c static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {}
cpu                86 kernel/power/energy_model.c 	int i, ret, cpu = cpumask_first(span);
cpu               109 kernel/power/energy_model.c 		ret = cb->active_power(&power, &freq, cpu);
cpu               111 kernel/power/energy_model.c 			pr_err("pd%d: invalid cap. state: %d\n", cpu, ret);
cpu               120 kernel/power/energy_model.c 			pr_err("pd%d: non-increasing freq: %lu\n", cpu, freq);
cpu               129 kernel/power/energy_model.c 			pr_err("pd%d: invalid power: %lu\n", cpu, power);
cpu               145 kernel/power/energy_model.c 					cpu, i, i - 1);
cpu               160 kernel/power/energy_model.c 	em_debug_create_pd(pd, cpu);
cpu               179 kernel/power/energy_model.c struct em_perf_domain *em_cpu_get(int cpu)
cpu               181 kernel/power/energy_model.c 	return READ_ONCE(per_cpu(em_data, cpu));
cpu               204 kernel/power/energy_model.c 	int cpu, ret = 0;
cpu               215 kernel/power/energy_model.c 	for_each_cpu(cpu, span) {
cpu               217 kernel/power/energy_model.c 		if (READ_ONCE(per_cpu(em_data, cpu))) {
cpu               226 kernel/power/energy_model.c 		cap = arch_scale_cpu_capacity(cpu);
cpu               243 kernel/power/energy_model.c 	for_each_cpu(cpu, span) {
cpu               249 kernel/power/energy_model.c 		smp_store_release(per_cpu_ptr(&em_data, cpu), pd);
cpu              2295 kernel/printk/printk.c static int console_cpu_notify(unsigned int cpu)
cpu               245 kernel/printk/printk_safe.c 	int cpu;
cpu               247 kernel/printk/printk_safe.c 	for_each_possible_cpu(cpu) {
cpu               249 kernel/printk/printk_safe.c 		__printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
cpu               251 kernel/printk/printk_safe.c 		__printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
cpu               390 kernel/printk/printk_safe.c 	int cpu;
cpu               392 kernel/printk/printk_safe.c 	for_each_possible_cpu(cpu) {
cpu               395 kernel/printk/printk_safe.c 		s = &per_cpu(safe_print_seq, cpu);
cpu               399 kernel/printk/printk_safe.c 		s = &per_cpu(nmi_print_seq, cpu);
cpu               242 kernel/profile.c 	int cpu = smp_processor_id();
cpu               244 kernel/profile.c 	per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
cpu               249 kernel/profile.c 	int i, j, cpu;
cpu               255 kernel/profile.c 	for_each_online_cpu(cpu) {
cpu               256 kernel/profile.c 		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
cpu               272 kernel/profile.c 	int i, cpu;
cpu               278 kernel/profile.c 	for_each_online_cpu(cpu) {
cpu               279 kernel/profile.c 		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
cpu               288 kernel/profile.c 	int i, j, cpu;
cpu               294 kernel/profile.c 	cpu = get_cpu();
cpu               295 kernel/profile.c 	hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
cpu               334 kernel/profile.c static int profile_dead_cpu(unsigned int cpu)
cpu               340 kernel/profile.c 		cpumask_clear_cpu(cpu, prof_cpu_mask);
cpu               343 kernel/profile.c 		if (per_cpu(cpu_profile_hits, cpu)[i]) {
cpu               344 kernel/profile.c 			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
cpu               345 kernel/profile.c 			per_cpu(cpu_profile_hits, cpu)[i] = NULL;
cpu               352 kernel/profile.c static int profile_prepare_cpu(unsigned int cpu)
cpu               354 kernel/profile.c 	int i, node = cpu_to_mem(cpu);
cpu               357 kernel/profile.c 	per_cpu(cpu_profile_flip, cpu) = 0;
cpu               360 kernel/profile.c 		if (per_cpu(cpu_profile_hits, cpu)[i])
cpu               365 kernel/profile.c 			profile_dead_cpu(cpu);
cpu               368 kernel/profile.c 		per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
cpu               374 kernel/profile.c static int profile_online_cpu(unsigned int cpu)
cpu               377 kernel/profile.c 		cpumask_set_cpu(cpu, prof_cpu_mask);
cpu               282 kernel/rcu/rcu.h extern void resched_cpu(int cpu);
cpu               352 kernel/rcu/rcu.h #define for_each_leaf_node_possible_cpu(rnp, cpu) \
cpu               353 kernel/rcu/rcu.h 	for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
cpu               354 kernel/rcu/rcu.h 	     (cpu) <= rnp->grphi; \
cpu               355 kernel/rcu/rcu.h 	     (cpu) = cpumask_next((cpu), cpu_possible_mask))
cpu               360 kernel/rcu/rcu.h #define rcu_find_next_bit(rnp, cpu, mask) \
cpu               361 kernel/rcu/rcu.h 	((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
cpu               362 kernel/rcu/rcu.h #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
cpu               363 kernel/rcu/rcu.h 	for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
cpu               364 kernel/rcu/rcu.h 	     (cpu) <= rnp->grphi; \
cpu               365 kernel/rcu/rcu.h 	     (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
cpu               530 kernel/rcu/rcu.h bool rcu_is_nocb_cpu(int cpu);
cpu               533 kernel/rcu/rcu.h static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
cpu               692 kernel/rcu/rcutorture.c 	int cpu;
cpu               694 kernel/rcu/rcutorture.c 	for_each_online_cpu(cpu) {
cpu               695 kernel/rcu/rcutorture.c 		rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
cpu               696 kernel/rcu/rcutorture.c 		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
cpu              1401 kernel/rcu/rcutorture.c 	int cpu;
cpu              1409 kernel/rcu/rcutorture.c 	for_each_possible_cpu(cpu) {
cpu              1411 kernel/rcu/rcutorture.c 			pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
cpu              1412 kernel/rcu/rcutorture.c 			batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
cpu              1540 kernel/rcu/rcutorture.c static int rcutorture_booster_cleanup(unsigned int cpu)
cpu              1544 kernel/rcu/rcutorture.c 	if (boost_tasks[cpu] == NULL)
cpu              1547 kernel/rcu/rcutorture.c 	t = boost_tasks[cpu];
cpu              1548 kernel/rcu/rcutorture.c 	boost_tasks[cpu] = NULL;
cpu              1557 kernel/rcu/rcutorture.c static int rcutorture_booster_init(unsigned int cpu)
cpu              1561 kernel/rcu/rcutorture.c 	if (boost_tasks[cpu] != NULL)
cpu              1568 kernel/rcu/rcutorture.c 	boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
cpu              1569 kernel/rcu/rcutorture.c 						  cpu_to_node(cpu),
cpu              1571 kernel/rcu/rcutorture.c 	if (IS_ERR(boost_tasks[cpu])) {
cpu              1572 kernel/rcu/rcutorture.c 		retval = PTR_ERR(boost_tasks[cpu]);
cpu              1575 kernel/rcu/rcutorture.c 		boost_tasks[cpu] = NULL;
cpu              1579 kernel/rcu/rcutorture.c 	kthread_bind(boost_tasks[cpu], cpu);
cpu              1580 kernel/rcu/rcutorture.c 	wake_up_process(boost_tasks[cpu]);
cpu              2327 kernel/rcu/rcutorture.c 	int cpu;
cpu              2396 kernel/rcu/rcutorture.c 	for_each_possible_cpu(cpu) {
cpu              2398 kernel/rcu/rcutorture.c 			per_cpu(rcu_torture_count, cpu)[i] = 0;
cpu              2399 kernel/rcu/rcutorture.c 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
cpu                85 kernel/rcu/srcutree.c 	int cpu;
cpu               133 kernel/rcu/srcutree.c 	for_each_possible_cpu(cpu) {
cpu               134 kernel/rcu/srcutree.c 		sdp = per_cpu_ptr(ssp->sda, cpu);
cpu               140 kernel/rcu/srcutree.c 		sdp->mynode = &snp_first[cpu / levelspread[level]];
cpu               143 kernel/rcu/srcutree.c 				snp->grplo = cpu;
cpu               144 kernel/rcu/srcutree.c 			snp->grphi = cpu;
cpu               146 kernel/rcu/srcutree.c 		sdp->cpu = cpu;
cpu               150 kernel/rcu/srcutree.c 		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
cpu               249 kernel/rcu/srcutree.c 	int cpu;
cpu               252 kernel/rcu/srcutree.c 	for_each_possible_cpu(cpu) {
cpu               253 kernel/rcu/srcutree.c 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
cpu               266 kernel/rcu/srcutree.c 	int cpu;
cpu               269 kernel/rcu/srcutree.c 	for_each_possible_cpu(cpu) {
cpu               270 kernel/rcu/srcutree.c 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
cpu               335 kernel/rcu/srcutree.c 	int cpu;
cpu               338 kernel/rcu/srcutree.c 	for_each_possible_cpu(cpu) {
cpu               339 kernel/rcu/srcutree.c 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
cpu               372 kernel/rcu/srcutree.c 	int cpu;
cpu               379 kernel/rcu/srcutree.c 	for_each_possible_cpu(cpu) {
cpu               380 kernel/rcu/srcutree.c 		struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
cpu               462 kernel/rcu/srcutree.c 	queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
cpu               469 kernel/rcu/srcutree.c 		queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
cpu               494 kernel/rcu/srcutree.c 	int cpu;
cpu               496 kernel/rcu/srcutree.c 	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
cpu               497 kernel/rcu/srcutree.c 		if (!(mask & (1 << (cpu - snp->grplo))))
cpu               499 kernel/rcu/srcutree.c 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
cpu               517 kernel/rcu/srcutree.c 	int cpu;
cpu               562 kernel/rcu/srcutree.c 			for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
cpu               563 kernel/rcu/srcutree.c 				sdp = per_cpu_ptr(ssp->sda, cpu);
cpu              1024 kernel/rcu/srcutree.c 	int cpu;
cpu              1049 kernel/rcu/srcutree.c 	for_each_possible_cpu(cpu) {
cpu              1050 kernel/rcu/srcutree.c 		sdp = per_cpu_ptr(ssp->sda, cpu);
cpu              1255 kernel/rcu/srcutree.c 	int cpu;
cpu              1262 kernel/rcu/srcutree.c 	for_each_possible_cpu(cpu) {
cpu              1268 kernel/rcu/srcutree.c 		sdp = per_cpu_ptr(ssp->sda, cpu);
cpu              1284 kernel/rcu/srcutree.c 			cpu, c0, c1,
cpu               152 kernel/rcu/tree.c static void sync_sched_exp_online_cleanup(int cpu);
cpu               210 kernel/rcu/tree.c static long rcu_get_n_cbs_cpu(int cpu)
cpu               212 kernel/rcu/tree.c 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu               341 kernel/rcu/tree.c bool rcu_eqs_special_set(int cpu)
cpu               345 kernel/rcu/tree.c 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
cpu               917 kernel/rcu/tree.c 	int cpu;
cpu               920 kernel/rcu/tree.c 	cpu = task_cpu(t);
cpu               923 kernel/rcu/tree.c 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
cpu               987 kernel/rcu/tree.c 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
cpu              1016 kernel/rcu/tree.c 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
cpu              1036 kernel/rcu/tree.c 			__func__, rdp->cpu, ".o"[onl],
cpu              1054 kernel/rcu/tree.c 	ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
cpu              1055 kernel/rcu/tree.c 	rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
cpu              1074 kernel/rcu/tree.c 	if (tick_nohz_full_cpu(rdp->cpu) &&
cpu              1077 kernel/rcu/tree.c 		resched_cpu(rdp->cpu);
cpu              1091 kernel/rcu/tree.c 			resched_cpu(rdp->cpu);
cpu              1100 kernel/rcu/tree.c 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
cpu              1947 kernel/rcu/tree.c rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
cpu              2020 kernel/rcu/tree.c 	rcu_report_qs_rdp(rdp->cpu, rdp);
cpu              2027 kernel/rcu/tree.c int rcutree_dying_cpu(unsigned int cpu)
cpu              2093 kernel/rcu/tree.c int rcutree_dead_cpu(unsigned int cpu)
cpu              2095 kernel/rcu/tree.c 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              2104 kernel/rcu/tree.c 	do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
cpu              2260 kernel/rcu/tree.c 	int cpu;
cpu              2284 kernel/rcu/tree.c 		for_each_leaf_node_possible_cpu(rnp, cpu) {
cpu              2285 kernel/rcu/tree.c 			unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
cpu              2287 kernel/rcu/tree.c 				if (f(per_cpu_ptr(&rcu_data, cpu)))
cpu              2426 kernel/rcu/tree.c static void rcu_cpu_kthread_park(unsigned int cpu)
cpu              2428 kernel/rcu/tree.c 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
cpu              2431 kernel/rcu/tree.c static int rcu_cpu_kthread_should_run(unsigned int cpu)
cpu              2441 kernel/rcu/tree.c static void rcu_cpu_kthread(unsigned int cpu)
cpu              2485 kernel/rcu/tree.c 	int cpu;
cpu              2487 kernel/rcu/tree.c 	for_each_possible_cpu(cpu)
cpu              2488 kernel/rcu/tree.c 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
cpu              2835 kernel/rcu/tree.c static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
cpu              2837 kernel/rcu/tree.c 	trace_rcu_barrier(rcu_state.name, s, cpu,
cpu              2888 kernel/rcu/tree.c 	int cpu;
cpu              2925 kernel/rcu/tree.c 	for_each_possible_cpu(cpu) {
cpu              2926 kernel/rcu/tree.c 		rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              2927 kernel/rcu/tree.c 		if (!cpu_online(cpu) &&
cpu              2931 kernel/rcu/tree.c 			rcu_barrier_trace(TPS("OnlineQ"), cpu,
cpu              2933 kernel/rcu/tree.c 			smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
cpu              2935 kernel/rcu/tree.c 			rcu_barrier_trace(TPS("OnlineNQ"), cpu,
cpu              2992 kernel/rcu/tree.c rcu_boot_init_percpu_data(int cpu)
cpu              2994 kernel/rcu/tree.c 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              2997 kernel/rcu/tree.c 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
cpu              3004 kernel/rcu/tree.c 	rdp->cpu = cpu;
cpu              3018 kernel/rcu/tree.c int rcutree_prepare_cpu(unsigned int cpu)
cpu              3021 kernel/rcu/tree.c 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              3052 kernel/rcu/tree.c 	rcu_prepare_kthreads(cpu);
cpu              3053 kernel/rcu/tree.c 	rcu_spawn_cpu_nocb_kthread(cpu);
cpu              3061 kernel/rcu/tree.c static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
cpu              3063 kernel/rcu/tree.c 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              3072 kernel/rcu/tree.c int rcutree_online_cpu(unsigned int cpu)
cpu              3078 kernel/rcu/tree.c 	rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              3085 kernel/rcu/tree.c 	sync_sched_exp_online_cleanup(cpu);
cpu              3086 kernel/rcu/tree.c 	rcutree_affinity_setting(cpu, -1);
cpu              3094 kernel/rcu/tree.c int rcutree_offline_cpu(unsigned int cpu)
cpu              3100 kernel/rcu/tree.c 	rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              3106 kernel/rcu/tree.c 	rcutree_affinity_setting(cpu, cpu);
cpu              3123 kernel/rcu/tree.c void rcu_cpu_starting(unsigned int cpu)
cpu              3132 kernel/rcu/tree.c 	if (per_cpu(rcu_cpu_started, cpu))
cpu              3135 kernel/rcu/tree.c 	per_cpu(rcu_cpu_started, cpu) = 1;
cpu              3137 kernel/rcu/tree.c 	rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              3169 kernel/rcu/tree.c void rcu_report_dead(unsigned int cpu)
cpu              3173 kernel/rcu/tree.c 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              3197 kernel/rcu/tree.c 	per_cpu(rcu_cpu_started, cpu) = 0;
cpu              3205 kernel/rcu/tree.c void rcutree_migrate_callbacks(int cpu)
cpu              3210 kernel/rcu/tree.c 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              3244 kernel/rcu/tree.c 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
cpu              3524 kernel/rcu/tree.c 	int cpu;
cpu              3542 kernel/rcu/tree.c 	for_each_online_cpu(cpu) {
cpu              3543 kernel/rcu/tree.c 		rcutree_prepare_cpu(cpu);
cpu              3544 kernel/rcu/tree.c 		rcu_cpu_starting(cpu);
cpu              3545 kernel/rcu/tree.c 		rcutree_online_cpu(cpu);
cpu               134 kernel/rcu/tree.h #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
cpu               249 kernel/rcu/tree.h 	int cpu;
cpu               422 kernel/rcu/tree.h static void rcu_cpu_kthread_setup(unsigned int cpu);
cpu               424 kernel/rcu/tree.h static void rcu_prepare_kthreads(int cpu);
cpu               443 kernel/rcu/tree.h static void rcu_spawn_cpu_nocb_kthread(int cpu);
cpu               334 kernel/rcu/tree_exp.h 	int cpu;
cpu               347 kernel/rcu/tree_exp.h 	for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
cpu               348 kernel/rcu/tree_exp.h 		unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
cpu               349 kernel/rcu/tree_exp.h 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu               352 kernel/rcu/tree_exp.h 		if (raw_smp_processor_id() == cpu ||
cpu               375 kernel/rcu/tree_exp.h 	for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
cpu               376 kernel/rcu/tree_exp.h 		unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
cpu               377 kernel/rcu/tree_exp.h 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu               384 kernel/rcu/tree_exp.h 		if (get_cpu() == cpu) {
cpu               388 kernel/rcu/tree_exp.h 		ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
cpu               421 kernel/rcu/tree_exp.h 	int cpu;
cpu               441 kernel/rcu/tree_exp.h 		cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
cpu               443 kernel/rcu/tree_exp.h 		if (unlikely(cpu > rnp->grphi - rnp->grplo))
cpu               444 kernel/rcu/tree_exp.h 			cpu = WORK_CPU_UNBOUND;
cpu               446 kernel/rcu/tree_exp.h 			cpu += rnp->grplo;
cpu               447 kernel/rcu/tree_exp.h 		queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
cpu               459 kernel/rcu/tree_exp.h 	int cpu;
cpu               488 kernel/rcu/tree_exp.h 			for_each_leaf_node_possible_cpu(rnp, cpu) {
cpu               491 kernel/rcu/tree_exp.h 				mask = leaf_node_cpu_bit(rnp, cpu);
cpu               495 kernel/rcu/tree_exp.h 				rdp = per_cpu_ptr(&rcu_data, cpu);
cpu               496 kernel/rcu/tree_exp.h 				pr_cont(" %d-%c%c%c", cpu,
cpu               497 kernel/rcu/tree_exp.h 					"O."[!!cpu_online(cpu)],
cpu               521 kernel/rcu/tree_exp.h 			for_each_leaf_node_possible_cpu(rnp, cpu) {
cpu               522 kernel/rcu/tree_exp.h 				mask = leaf_node_cpu_bit(rnp, cpu);
cpu               525 kernel/rcu/tree_exp.h 				dump_cpu_task(cpu);
cpu               672 kernel/rcu/tree_exp.h static void sync_sched_exp_online_cleanup(int cpu)
cpu               728 kernel/rcu/tree_exp.h static void sync_sched_exp_online_cleanup(int cpu)
cpu               736 kernel/rcu/tree_exp.h 	rdp = per_cpu_ptr(&rcu_data, cpu);
cpu               746 kernel/rcu/tree_exp.h 	if (my_cpu == cpu) {
cpu               754 kernel/rcu/tree_exp.h 	ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
cpu               616 kernel/rcu/tree_plugin.h 		      tick_nohz_full_cpu(rdp->cpu);
cpu               636 kernel/rcu/tree_plugin.h 				irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
cpu               745 kernel/rcu/tree_plugin.h 	int cpu;
cpu               770 kernel/rcu/tree_plugin.h 	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
cpu               771 kernel/rcu/tree_plugin.h 		rdp = per_cpu_ptr(&rcu_data, cpu);
cpu               774 kernel/rcu/tree_plugin.h 			cpu, ".o"[onl],
cpu               944 kernel/rcu/tree_plugin.h static void rcu_cpu_kthread_setup(unsigned int cpu)
cpu              1159 kernel/rcu/tree_plugin.h 	int cpu;
cpu              1165 kernel/rcu/tree_plugin.h 	for_each_leaf_node_possible_cpu(rnp, cpu)
cpu              1166 kernel/rcu/tree_plugin.h 		if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
cpu              1167 kernel/rcu/tree_plugin.h 		    cpu != outgoingcpu)
cpu              1168 kernel/rcu/tree_plugin.h 			cpumask_set_cpu(cpu, cm);
cpu              1186 kernel/rcu/tree_plugin.h static void rcu_prepare_kthreads(int cpu)
cpu              1188 kernel/rcu/tree_plugin.h 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              1221 kernel/rcu/tree_plugin.h static void rcu_prepare_kthreads(int cpu)
cpu              1519 kernel/rcu/tree_plugin.h 	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
cpu              1538 kernel/rcu/tree_plugin.h 	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
cpu              1606 kernel/rcu/tree_plugin.h 	    cpu_online(rdp->cpu))
cpu              1631 kernel/rcu/tree_plugin.h bool rcu_is_nocb_cpu(int cpu)
cpu              1634 kernel/rcu/tree_plugin.h 		return cpumask_test_cpu(cpu, rcu_nocb_mask);
cpu              1651 kernel/rcu/tree_plugin.h 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
cpu              1662 kernel/rcu/tree_plugin.h 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
cpu              1680 kernel/rcu/tree_plugin.h 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
cpu              1806 kernel/rcu/tree_plugin.h 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
cpu              1821 kernel/rcu/tree_plugin.h 				trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
cpu              1844 kernel/rcu/tree_plugin.h 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
cpu              1854 kernel/rcu/tree_plugin.h 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
cpu              1858 kernel/rcu/tree_plugin.h 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
cpu              1884 kernel/rcu/tree_plugin.h 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
cpu              1896 kernel/rcu/tree_plugin.h 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
cpu              1921 kernel/rcu/tree_plugin.h 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
cpu              1933 kernel/rcu/tree_plugin.h 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
cpu              1947 kernel/rcu/tree_plugin.h 	int __maybe_unused cpu = my_rdp->cpu;
cpu              1965 kernel/rcu/tree_plugin.h 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
cpu              1979 kernel/rcu/tree_plugin.h 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
cpu              2007 kernel/rcu/tree_plugin.h 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
cpu              2039 kernel/rcu/tree_plugin.h 			trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
cpu              2043 kernel/rcu/tree_plugin.h 		trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
cpu              2046 kernel/rcu/tree_plugin.h 		trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
cpu              2119 kernel/rcu/tree_plugin.h 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
cpu              2131 kernel/rcu/tree_plugin.h 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
cpu              2171 kernel/rcu/tree_plugin.h 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
cpu              2195 kernel/rcu/tree_plugin.h 	int cpu;
cpu              2231 kernel/rcu/tree_plugin.h 	for_each_cpu(cpu, rcu_nocb_mask) {
cpu              2232 kernel/rcu/tree_plugin.h 		rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              2258 kernel/rcu/tree_plugin.h static void rcu_spawn_one_nocb_kthread(int cpu)
cpu              2260 kernel/rcu/tree_plugin.h 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              2268 kernel/rcu/tree_plugin.h 	if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
cpu              2275 kernel/rcu/tree_plugin.h 				"rcuog/%d", rdp_gp->cpu);
cpu              2283 kernel/rcu/tree_plugin.h 			"rcuo%c/%d", rcu_state.abbr, cpu);
cpu              2294 kernel/rcu/tree_plugin.h static void rcu_spawn_cpu_nocb_kthread(int cpu)
cpu              2297 kernel/rcu/tree_plugin.h 		rcu_spawn_one_nocb_kthread(cpu);
cpu              2308 kernel/rcu/tree_plugin.h 	int cpu;
cpu              2310 kernel/rcu/tree_plugin.h 	for_each_online_cpu(cpu)
cpu              2311 kernel/rcu/tree_plugin.h 		rcu_spawn_cpu_nocb_kthread(cpu);
cpu              2323 kernel/rcu/tree_plugin.h 	int cpu;
cpu              2345 kernel/rcu/tree_plugin.h 	for_each_cpu(cpu, rcu_nocb_mask) {
cpu              2346 kernel/rcu/tree_plugin.h 		rdp = per_cpu_ptr(&rcu_data, cpu);
cpu              2347 kernel/rcu/tree_plugin.h 		if (rdp->cpu >= nl) {
cpu              2350 kernel/rcu/tree_plugin.h 			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
cpu              2360 kernel/rcu/tree_plugin.h 					 __func__, cpu);
cpu              2368 kernel/rcu/tree_plugin.h 				pr_cont(" %d", cpu);
cpu              2396 kernel/rcu/tree_plugin.h 		rdp->cpu,
cpu              2424 kernel/rcu/tree_plugin.h 		rdp->cpu, rdp->nocb_gp_rdp->cpu,
cpu              2529 kernel/rcu/tree_plugin.h static void rcu_spawn_cpu_nocb_kthread(int cpu)
cpu               246 kernel/rcu/tree_stall.h 	int cpu;
cpu               252 kernel/rcu/tree_stall.h 		for_each_leaf_node_possible_cpu(rnp, cpu)
cpu               253 kernel/rcu/tree_stall.h 			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
cpu               254 kernel/rcu/tree_stall.h 				if (!trigger_single_cpu_backtrace(cpu))
cpu               255 kernel/rcu/tree_stall.h 					dump_cpu_task(cpu);
cpu               262 kernel/rcu/tree_stall.h static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
cpu               264 kernel/rcu/tree_stall.h 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
cpu               275 kernel/rcu/tree_stall.h static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
cpu               293 kernel/rcu/tree_stall.h static void print_cpu_stall_info(int cpu)
cpu               297 kernel/rcu/tree_stall.h 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
cpu               314 kernel/rcu/tree_stall.h 	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
cpu               317 kernel/rcu/tree_stall.h 	       cpu,
cpu               318 kernel/rcu/tree_stall.h 	       "O."[!!cpu_online(cpu)],
cpu               327 kernel/rcu/tree_stall.h 	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
cpu               356 kernel/rcu/tree_stall.h 	int cpu;
cpu               379 kernel/rcu/tree_stall.h 			for_each_leaf_node_possible_cpu(rnp, cpu)
cpu               380 kernel/rcu/tree_stall.h 				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
cpu               381 kernel/rcu/tree_stall.h 					print_cpu_stall_info(cpu);
cpu               388 kernel/rcu/tree_stall.h 	for_each_possible_cpu(cpu)
cpu               389 kernel/rcu/tree_stall.h 		totqlen += rcu_get_n_cbs_cpu(cpu);
cpu               427 kernel/rcu/tree_stall.h 	int cpu;
cpu               447 kernel/rcu/tree_stall.h 	for_each_possible_cpu(cpu)
cpu               448 kernel/rcu/tree_stall.h 		totqlen += rcu_get_n_cbs_cpu(cpu);
cpu               554 kernel/rcu/tree_stall.h 	int cpu;
cpu               582 kernel/rcu/tree_stall.h 		for_each_leaf_node_possible_cpu(rnp, cpu) {
cpu               583 kernel/rcu/tree_stall.h 			rdp = per_cpu_ptr(&rcu_data, cpu);
cpu               589 kernel/rcu/tree_stall.h 				cpu, (long)rdp->gp_seq_needed);
cpu               592 kernel/rcu/tree_stall.h 	for_each_possible_cpu(cpu) {
cpu               593 kernel/rcu/tree_stall.h 		rdp = per_cpu_ptr(&rcu_data, cpu);
cpu               663 kernel/rcu/tree_stall.h 	int cpu;
cpu               680 kernel/rcu/tree_stall.h 	for_each_possible_cpu(cpu) {
cpu               681 kernel/rcu/tree_stall.h 		cbs = rcu_get_n_cbs_cpu(cpu);
cpu               686 kernel/rcu/tree_stall.h 		pr_cont(" %d: %lu", cpu, cbs);
cpu               690 kernel/rcu/tree_stall.h 		max_cpu = cpu;
cpu               601 kernel/rcu/update.c 	int cpu;
cpu               620 kernel/rcu/update.c 	cpu = task_cpu(t);
cpu               623 kernel/rcu/update.c 		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
cpu               625 kernel/rcu/update.c 		 t->rcu_tasks_idle_cpu, cpu);
cpu               221 kernel/reboot.c 	int cpu = reboot_cpu;
cpu               226 kernel/reboot.c 	if (!cpu_online(cpu))
cpu               227 kernel/reboot.c 		cpu = cpumask_first(cpu_online_mask);
cpu               233 kernel/reboot.c 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
cpu               218 kernel/relay.c 	*per_cpu_ptr(chan->buf, buf->cpu) = NULL;
cpu               417 kernel/relay.c 					    unsigned int cpu)
cpu               425 kernel/relay.c 	snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
cpu               444 kernel/relay.c static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
cpu               457 kernel/relay.c 		dentry = relay_create_buf_file(chan, buf, cpu);
cpu               470 kernel/relay.c  	buf->cpu = cpu;
cpu               475 kernel/relay.c  		buf->cpu = 0;
cpu               522 kernel/relay.c int relay_prepare_cpu(unsigned int cpu)
cpu               529 kernel/relay.c 		if ((buf = *per_cpu_ptr(chan->buf, cpu)))
cpu               531 kernel/relay.c 		buf = relay_open_buf(chan, cpu);
cpu               533 kernel/relay.c 			pr_err("relay: cpu %d buffer creation failed\n", cpu);
cpu               537 kernel/relay.c 		*per_cpu_ptr(chan->buf, cpu) = buf;
cpu               814 kernel/relay.c 			    unsigned int cpu,
cpu               819 kernel/relay.c 	if (!chan || cpu >= NR_CPUS)
cpu               822 kernel/relay.c 	buf = *per_cpu_ptr(chan->buf, cpu);
cpu               978 kernel/relay.c 		relay_subbufs_consumed(buf->chan, buf->cpu, 1);
cpu               991 kernel/relay.c 		relay_subbufs_consumed(buf->chan, buf->cpu, 1);
cpu              1170 kernel/relay.c 		relay_subbufs_consumed(rbuf->chan, rbuf->cpu, 1);
cpu               103 kernel/sched/clock.c static inline struct sched_clock_data *cpu_sdc(int cpu)
cpu               105 kernel/sched/clock.c 	return &per_cpu(sched_clock_data, cpu);
cpu               157 kernel/sched/clock.c 	int cpu;
cpu               167 kernel/sched/clock.c 	for_each_possible_cpu(cpu)
cpu               168 kernel/sched/clock.c 		per_cpu(sched_clock_data, cpu) = *scd;
cpu               365 kernel/sched/clock.c u64 sched_clock_cpu(int cpu)
cpu               377 kernel/sched/clock.c 	scd = cpu_sdc(cpu);
cpu               379 kernel/sched/clock.c 	if (cpu != smp_processor_id())
cpu               461 kernel/sched/clock.c u64 sched_clock_cpu(int cpu)
cpu               510 kernel/sched/core.c 	int cpu;
cpu               517 kernel/sched/core.c 	cpu = cpu_of(rq);
cpu               519 kernel/sched/core.c 	if (cpu == smp_processor_id()) {
cpu               526 kernel/sched/core.c 		smp_send_reschedule(cpu);
cpu               528 kernel/sched/core.c 		trace_sched_wake_idle_without_ipi(cpu);
cpu               531 kernel/sched/core.c void resched_cpu(int cpu)
cpu               533 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu               537 kernel/sched/core.c 	if (cpu_online(cpu) || cpu == smp_processor_id())
cpu               554 kernel/sched/core.c 	int i, cpu = smp_processor_id();
cpu               557 kernel/sched/core.c 	if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER))
cpu               558 kernel/sched/core.c 		return cpu;
cpu               561 kernel/sched/core.c 	for_each_domain(cpu, sd) {
cpu               563 kernel/sched/core.c 			if (cpu == i)
cpu               567 kernel/sched/core.c 				cpu = i;
cpu               573 kernel/sched/core.c 	if (!housekeeping_cpu(cpu, HK_FLAG_TIMER))
cpu               574 kernel/sched/core.c 		cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
cpu               577 kernel/sched/core.c 	return cpu;
cpu               590 kernel/sched/core.c static void wake_up_idle_cpu(int cpu)
cpu               592 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu               594 kernel/sched/core.c 	if (cpu == smp_processor_id())
cpu               598 kernel/sched/core.c 		smp_send_reschedule(cpu);
cpu               600 kernel/sched/core.c 		trace_sched_wake_idle_without_ipi(cpu);
cpu               603 kernel/sched/core.c static bool wake_up_full_nohz_cpu(int cpu)
cpu               611 kernel/sched/core.c 	if (cpu_is_offline(cpu))
cpu               613 kernel/sched/core.c 	if (tick_nohz_full_cpu(cpu)) {
cpu               614 kernel/sched/core.c 		if (cpu != smp_processor_id() ||
cpu               616 kernel/sched/core.c 			tick_nohz_full_kick_cpu(cpu);
cpu               628 kernel/sched/core.c void wake_up_nohz_cpu(int cpu)
cpu               630 kernel/sched/core.c 	if (!wake_up_full_nohz_cpu(cpu))
cpu               631 kernel/sched/core.c 		wake_up_idle_cpu(cpu);
cpu               636 kernel/sched/core.c 	int cpu = smp_processor_id();
cpu               638 kernel/sched/core.c 	if (!(atomic_read(nohz_flags(cpu)) & NOHZ_KICK_MASK))
cpu               641 kernel/sched/core.c 	if (idle_cpu(cpu) && !need_resched())
cpu               648 kernel/sched/core.c 	atomic_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
cpu              1245 kernel/sched/core.c 	int cpu;
cpu              1249 kernel/sched/core.c 	for_each_possible_cpu(cpu) {
cpu              1250 kernel/sched/core.c 		memset(&cpu_rq(cpu)->uclamp, 0,
cpu              1252 kernel/sched/core.c 		cpu_rq(cpu)->uclamp_flags = 0;
cpu              1454 kernel/sched/core.c static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
cpu              1456 kernel/sched/core.c 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
cpu              1460 kernel/sched/core.c 		return cpu_online(cpu);
cpu              1462 kernel/sched/core.c 	return cpu_active(cpu);
cpu              1755 kernel/sched/core.c static void __migrate_swap_task(struct task_struct *p, int cpu)
cpu              1762 kernel/sched/core.c 		dst_rq = cpu_rq(cpu);
cpu              1768 kernel/sched/core.c 		set_task_cpu(p, cpu);
cpu              1781 kernel/sched/core.c 		p->wake_cpu = cpu;
cpu              1994 kernel/sched/core.c 	int cpu;
cpu              1997 kernel/sched/core.c 	cpu = task_cpu(p);
cpu              1998 kernel/sched/core.c 	if ((cpu != smp_processor_id()) && task_curr(p))
cpu              1999 kernel/sched/core.c 		smp_send_reschedule(cpu);
cpu              2026 kernel/sched/core.c static int select_fallback_rq(int cpu, struct task_struct *p)
cpu              2028 kernel/sched/core.c 	int nid = cpu_to_node(cpu);
cpu              2088 kernel/sched/core.c 					task_pid_nr(p), p->comm, cpu);
cpu              2099 kernel/sched/core.c int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
cpu              2104 kernel/sched/core.c 		cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
cpu              2106 kernel/sched/core.c 		cpu = cpumask_any(p->cpus_ptr);
cpu              2118 kernel/sched/core.c 	if (unlikely(!is_cpu_allowed(p, cpu)))
cpu              2119 kernel/sched/core.c 		cpu = select_fallback_rq(task_cpu(p), p);
cpu              2121 kernel/sched/core.c 	return cpu;
cpu              2130 kernel/sched/core.c void sched_set_stop_task(int cpu, struct task_struct *stop)
cpu              2133 kernel/sched/core.c 	struct task_struct *old_stop = cpu_rq(cpu)->stop;
cpu              2149 kernel/sched/core.c 	cpu_rq(cpu)->stop = stop;
cpu              2171 kernel/sched/core.c ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
cpu              2181 kernel/sched/core.c 	if (cpu == rq->cpu) {
cpu              2189 kernel/sched/core.c 		for_each_domain(rq->cpu, sd) {
cpu              2190 kernel/sched/core.c 			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
cpu              2346 kernel/sched/core.c static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
cpu              2348 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu              2352 kernel/sched/core.c 	if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
cpu              2354 kernel/sched/core.c 			smp_send_reschedule(cpu);
cpu              2356 kernel/sched/core.c 			trace_sched_wake_idle_without_ipi(cpu);
cpu              2360 kernel/sched/core.c void wake_up_if_idle(int cpu)
cpu              2362 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu              2371 kernel/sched/core.c 		trace_sched_wake_idle_without_ipi(cpu);
cpu              2375 kernel/sched/core.c 			smp_send_reschedule(cpu);
cpu              2390 kernel/sched/core.c static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
cpu              2392 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu              2396 kernel/sched/core.c 	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
cpu              2397 kernel/sched/core.c 		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
cpu              2398 kernel/sched/core.c 		ttwu_queue_remote(p, cpu, wake_flags);
cpu              2515 kernel/sched/core.c 	int cpu, success = 0;
cpu              2534 kernel/sched/core.c 		cpu = task_cpu(p);
cpu              2556 kernel/sched/core.c 	cpu = task_cpu(p);
cpu              2623 kernel/sched/core.c 	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
cpu              2624 kernel/sched/core.c 	if (task_cpu(p) != cpu) {
cpu              2627 kernel/sched/core.c 		set_task_cpu(p, cpu);
cpu              2639 kernel/sched/core.c 	ttwu_queue(p, cpu, wake_flags);
cpu              2644 kernel/sched/core.c 		ttwu_stat(p, cpu, wake_flags);
cpu              3439 kernel/sched/core.c unsigned long nr_iowait_cpu(int cpu)
cpu              3441 kernel/sched/core.c 	return atomic_read(&cpu_rq(cpu)->nr_iowait);
cpu              3587 kernel/sched/core.c 	int cpu = smp_processor_id();
cpu              3588 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu              3606 kernel/sched/core.c 	rq->idle_balance = idle_cpu(cpu);
cpu              3614 kernel/sched/core.c 	int			cpu;
cpu              3652 kernel/sched/core.c 	int cpu = twork->cpu;
cpu              3653 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu              3666 kernel/sched/core.c 	if (!tick_nohz_tick_stopped_cpu(cpu))
cpu              3671 kernel/sched/core.c 	if (cpu_is_offline(cpu))
cpu              3703 kernel/sched/core.c static void sched_tick_start(int cpu)
cpu              3708 kernel/sched/core.c 	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
cpu              3713 kernel/sched/core.c 	twork = per_cpu_ptr(tick_work_cpu, cpu);
cpu              3717 kernel/sched/core.c 		twork->cpu = cpu;
cpu              3724 kernel/sched/core.c static void sched_tick_stop(int cpu)
cpu              3729 kernel/sched/core.c 	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
cpu              3734 kernel/sched/core.c 	twork = per_cpu_ptr(tick_work_cpu, cpu);
cpu              3750 kernel/sched/core.c static inline void sched_tick_start(int cpu) { }
cpu              3751 kernel/sched/core.c static inline void sched_tick_stop(int cpu) { }
cpu              4003 kernel/sched/core.c 	int cpu;
cpu              4005 kernel/sched/core.c 	cpu = smp_processor_id();
cpu              4006 kernel/sched/core.c 	rq = cpu_rq(cpu);
cpu              4616 kernel/sched/core.c int idle_cpu(int cpu)
cpu              4618 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu              4640 kernel/sched/core.c int available_idle_cpu(int cpu)
cpu              4642 kernel/sched/core.c 	if (!idle_cpu(cpu))
cpu              4645 kernel/sched/core.c 	if (vcpu_is_preempted(cpu))
cpu              4657 kernel/sched/core.c struct task_struct *idle_task(int cpu)
cpu              4659 kernel/sched/core.c 	return cpu_rq(cpu)->idle;
cpu              6016 kernel/sched/core.c void init_idle(struct task_struct *idle, int cpu)
cpu              6018 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu              6039 kernel/sched/core.c 	set_cpus_allowed_common(idle, cpumask_of(cpu));
cpu              6052 kernel/sched/core.c 	__set_task_cpu(idle, cpu);
cpu              6065 kernel/sched/core.c 	init_idle_preempt_count(idle, cpu);
cpu              6071 kernel/sched/core.c 	ftrace_graph_init_idle_task(idle, cpu);
cpu              6072 kernel/sched/core.c 	vtime_init_idle(idle, cpu);
cpu              6074 kernel/sched/core.c 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
cpu              6289 kernel/sched/core.c 		dest_cpu = select_fallback_rq(dead_rq->cpu, next);
cpu              6309 kernel/sched/core.c 		cpumask_set_cpu(rq->cpu, rq->rd->online);
cpu              6329 kernel/sched/core.c 		cpumask_clear_cpu(rq->cpu, rq->rd->online);
cpu              6369 kernel/sched/core.c static int cpuset_cpu_inactive(unsigned int cpu)
cpu              6372 kernel/sched/core.c 		if (dl_cpu_busy(cpu))
cpu              6382 kernel/sched/core.c int sched_cpu_activate(unsigned int cpu)
cpu              6384 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu              6391 kernel/sched/core.c 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
cpu              6394 kernel/sched/core.c 	set_cpu_active(cpu, true);
cpu              6397 kernel/sched/core.c 		sched_domains_numa_masks_set(cpu);
cpu              6412 kernel/sched/core.c 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
cpu              6420 kernel/sched/core.c int sched_cpu_deactivate(unsigned int cpu)
cpu              6424 kernel/sched/core.c 	set_cpu_active(cpu, false);
cpu              6438 kernel/sched/core.c 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
cpu              6445 kernel/sched/core.c 	ret = cpuset_cpu_inactive(cpu);
cpu              6447 kernel/sched/core.c 		set_cpu_active(cpu, true);
cpu              6450 kernel/sched/core.c 	sched_domains_numa_masks_clear(cpu);
cpu              6454 kernel/sched/core.c static void sched_rq_cpu_starting(unsigned int cpu)
cpu              6456 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu              6462 kernel/sched/core.c int sched_cpu_starting(unsigned int cpu)
cpu              6464 kernel/sched/core.c 	sched_rq_cpu_starting(cpu);
cpu              6465 kernel/sched/core.c 	sched_tick_start(cpu);
cpu              6470 kernel/sched/core.c int sched_cpu_dying(unsigned int cpu)
cpu              6472 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
cpu              6477 kernel/sched/core.c 	sched_tick_stop(cpu);
cpu              6481 kernel/sched/core.c 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
cpu              6669 kernel/sched/core.c 		rq->cpu = i;
cpu              6880 kernel/sched/core.c struct task_struct *curr_task(int cpu)
cpu              6882 kernel/sched/core.c 	return cpu_curr(cpu);
cpu              6903 kernel/sched/core.c void ia64_set_curr_task(int cpu, struct task_struct *p)
cpu              6905 kernel/sched/core.c 	cpu_curr(cpu) = p;
cpu              7914 kernel/sched/core.c void dump_cpu_task(int cpu)
cpu              7916 kernel/sched/core.c 	pr_info("Task dump for CPU %d:\n", cpu);
cpu              7917 kernel/sched/core.c 	sched_show_task(cpu_curr(cpu));
cpu                98 kernel/sched/cpuacct.c static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
cpu               101 kernel/sched/cpuacct.c 	struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
cpu               114 kernel/sched/cpuacct.c 	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
cpu               128 kernel/sched/cpuacct.c 	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
cpu               134 kernel/sched/cpuacct.c static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
cpu               136 kernel/sched/cpuacct.c 	struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
cpu               143 kernel/sched/cpuacct.c 	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
cpu               150 kernel/sched/cpuacct.c 	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
cpu               189 kernel/sched/cpuacct.c 	int cpu;
cpu               197 kernel/sched/cpuacct.c 	for_each_possible_cpu(cpu)
cpu               198 kernel/sched/cpuacct.c 		cpuacct_cpuusage_write(ca, cpu, 0);
cpu               237 kernel/sched/cpuacct.c 	int cpu;
cpu               244 kernel/sched/cpuacct.c 	for_each_possible_cpu(cpu) {
cpu               245 kernel/sched/cpuacct.c 		struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
cpu               247 kernel/sched/cpuacct.c 		seq_printf(m, "%d", cpu);
cpu               255 kernel/sched/cpuacct.c 			raw_spin_lock_irq(&cpu_rq(cpu)->lock);
cpu               261 kernel/sched/cpuacct.c 			raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
cpu               273 kernel/sched/cpuacct.c 	int cpu;
cpu               277 kernel/sched/cpuacct.c 	for_each_possible_cpu(cpu) {
cpu               278 kernel/sched/cpuacct.c 		u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
cpu                30 kernel/sched/cpudeadline.c 	int orig_cpu = cp->elements[idx].cpu;
cpu                58 kernel/sched/cpudeadline.c 		cp->elements[idx].cpu = cp->elements[largest].cpu;
cpu                60 kernel/sched/cpudeadline.c 		cp->elements[cp->elements[idx].cpu].idx = idx;
cpu                64 kernel/sched/cpudeadline.c 	cp->elements[idx].cpu = orig_cpu;
cpu                66 kernel/sched/cpudeadline.c 	cp->elements[cp->elements[idx].cpu].idx = idx;
cpu                73 kernel/sched/cpudeadline.c 	int orig_cpu = cp->elements[idx].cpu;
cpu                84 kernel/sched/cpudeadline.c 		cp->elements[idx].cpu = cp->elements[p].cpu;
cpu                86 kernel/sched/cpudeadline.c 		cp->elements[cp->elements[idx].cpu].idx = idx;
cpu                90 kernel/sched/cpudeadline.c 	cp->elements[idx].cpu = orig_cpu;
cpu                92 kernel/sched/cpudeadline.c 	cp->elements[cp->elements[idx].cpu].idx = idx;
cpu               106 kernel/sched/cpudeadline.c 	return cp->elements[0].cpu;
cpu               150 kernel/sched/cpudeadline.c void cpudl_clear(struct cpudl *cp, int cpu)
cpu               155 kernel/sched/cpudeadline.c 	WARN_ON(!cpu_present(cpu));
cpu               159 kernel/sched/cpudeadline.c 	old_idx = cp->elements[cpu].idx;
cpu               167 kernel/sched/cpudeadline.c 		new_cpu = cp->elements[cp->size - 1].cpu;
cpu               169 kernel/sched/cpudeadline.c 		cp->elements[old_idx].cpu = new_cpu;
cpu               172 kernel/sched/cpudeadline.c 		cp->elements[cpu].idx = IDX_INVALID;
cpu               175 kernel/sched/cpudeadline.c 		cpumask_set_cpu(cpu, cp->free_cpus);
cpu               190 kernel/sched/cpudeadline.c void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
cpu               195 kernel/sched/cpudeadline.c 	WARN_ON(!cpu_present(cpu));
cpu               199 kernel/sched/cpudeadline.c 	old_idx = cp->elements[cpu].idx;
cpu               204 kernel/sched/cpudeadline.c 		cp->elements[new_idx].cpu = cpu;
cpu               205 kernel/sched/cpudeadline.c 		cp->elements[cpu].idx = new_idx;
cpu               207 kernel/sched/cpudeadline.c 		cpumask_clear_cpu(cpu, cp->free_cpus);
cpu               221 kernel/sched/cpudeadline.c void cpudl_set_freecpu(struct cpudl *cp, int cpu)
cpu               223 kernel/sched/cpudeadline.c 	cpumask_set_cpu(cpu, cp->free_cpus);
cpu               231 kernel/sched/cpudeadline.c void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
cpu               233 kernel/sched/cpudeadline.c 	cpumask_clear_cpu(cpu, cp->free_cpus);
cpu                 7 kernel/sched/cpudeadline.h 	int			cpu;
cpu                20 kernel/sched/cpudeadline.h void cpudl_set(struct cpudl *cp, int cpu, u64 dl);
cpu                21 kernel/sched/cpudeadline.h void cpudl_clear(struct cpudl *cp, int cpu);
cpu                23 kernel/sched/cpudeadline.h void cpudl_set_freecpu(struct cpudl *cp, int cpu);
cpu                24 kernel/sched/cpudeadline.h void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
cpu                32 kernel/sched/cpufreq.c void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
cpu                39 kernel/sched/cpufreq.c 	if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
cpu                43 kernel/sched/cpufreq.c 	rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
cpu                57 kernel/sched/cpufreq.c void cpufreq_remove_update_util_hook(int cpu)
cpu                59 kernel/sched/cpufreq.c 	rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
cpu                50 kernel/sched/cpufreq_schedutil.c 	unsigned int		cpu;
cpu               118 kernel/sched/cpufreq_schedutil.c 	int cpu;
cpu               130 kernel/sched/cpufreq_schedutil.c 		for_each_cpu(cpu, policy->cpus)
cpu               131 kernel/sched/cpufreq_schedutil.c 			trace_cpu_frequency(next_freq, cpu);
cpu               206 kernel/sched/cpufreq_schedutil.c unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
cpu               211 kernel/sched/cpufreq_schedutil.c 	struct rq *rq = cpu_rq(cpu);
cpu               294 kernel/sched/cpufreq_schedutil.c 	struct rq *rq = cpu_rq(sg_cpu->cpu);
cpu               296 kernel/sched/cpufreq_schedutil.c 	unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
cpu               301 kernel/sched/cpufreq_schedutil.c 	return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
cpu               430 kernel/sched/cpufreq_schedutil.c 	unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
cpu               446 kernel/sched/cpufreq_schedutil.c 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
cpu               836 kernel/sched/cpufreq_schedutil.c 	unsigned int cpu;
cpu               846 kernel/sched/cpufreq_schedutil.c 	for_each_cpu(cpu, policy->cpus) {
cpu               847 kernel/sched/cpufreq_schedutil.c 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
cpu               850 kernel/sched/cpufreq_schedutil.c 		sg_cpu->cpu			= cpu;
cpu               854 kernel/sched/cpufreq_schedutil.c 	for_each_cpu(cpu, policy->cpus) {
cpu               855 kernel/sched/cpufreq_schedutil.c 		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
cpu               857 kernel/sched/cpufreq_schedutil.c 		cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
cpu               868 kernel/sched/cpufreq_schedutil.c 	unsigned int cpu;
cpu               870 kernel/sched/cpufreq_schedutil.c 	for_each_cpu(cpu, policy->cpus)
cpu               871 kernel/sched/cpufreq_schedutil.c 		cpufreq_remove_update_util_hook(cpu);
cpu               131 kernel/sched/cpupri.c void cpupri_set(struct cpupri *cp, int cpu, int newpri)
cpu               133 kernel/sched/cpupri.c 	int *currpri = &cp->cpu_to_pri[cpu];
cpu               153 kernel/sched/cpupri.c 		cpumask_set_cpu(cpu, vec->mask);
cpu               187 kernel/sched/cpupri.c 		cpumask_clear_cpu(cpu, vec->mask);
cpu                22 kernel/sched/cpupri.h void cpupri_set(struct cpupri *cp, int cpu, int pri);
cpu                54 kernel/sched/cputime.c 	int cpu;
cpu                59 kernel/sched/cputime.c 	cpu = smp_processor_id();
cpu                60 kernel/sched/cputime.c 	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
cpu               823 kernel/sched/cputime.c void vtime_init_idle(struct task_struct *t, int cpu)
cpu               388 kernel/sched/deadline.c 	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
cpu               405 kernel/sched/deadline.c 	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
cpu               519 kernel/sched/deadline.c 	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
cpu               524 kernel/sched/deadline.c 	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
cpu               536 kernel/sched/deadline.c 		int cpu;
cpu               542 kernel/sched/deadline.c 		cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
cpu               543 kernel/sched/deadline.c 		if (cpu >= nr_cpu_ids) {
cpu               555 kernel/sched/deadline.c 			cpu = cpumask_any(cpu_active_mask);
cpu               557 kernel/sched/deadline.c 		later_rq = cpu_rq(cpu);
cpu               593 kernel/sched/deadline.c 	set_task_cpu(p, later_rq->cpu);
cpu              1184 kernel/sched/deadline.c 	int cpu = cpu_of(rq);
cpu              1230 kernel/sched/deadline.c 		unsigned long scale_freq = arch_scale_freq_capacity(cpu);
cpu              1231 kernel/sched/deadline.c 		unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
cpu              1341 kernel/sched/deadline.c 		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
cpu              1356 kernel/sched/deadline.c 		cpudl_clear(&rq->rd->cpudl, rq->cpu);
cpu              1363 kernel/sched/deadline.c 		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
cpu              1602 kernel/sched/deadline.c select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
cpu              1610 kernel/sched/deadline.c 	rq = cpu_rq(cpu);
cpu              1634 kernel/sched/deadline.c 			cpu = target;
cpu              1639 kernel/sched/deadline.c 	return cpu;
cpu              1840 kernel/sched/deadline.c static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
cpu              1843 kernel/sched/deadline.c 	    cpumask_test_cpu(cpu, p->cpus_ptr))
cpu              1852 kernel/sched/deadline.c static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
cpu              1864 kernel/sched/deadline.c 		if (pick_dl_task(rq, p, cpu))
cpu              1881 kernel/sched/deadline.c 	int cpu = task_cpu(task);
cpu              1909 kernel/sched/deadline.c 	if (cpumask_test_cpu(cpu, later_mask))
cpu              1910 kernel/sched/deadline.c 		return cpu;
cpu              1919 kernel/sched/deadline.c 	for_each_domain(cpu, sd) {
cpu              1956 kernel/sched/deadline.c 	cpu = cpumask_any(later_mask);
cpu              1957 kernel/sched/deadline.c 	if (cpu < nr_cpu_ids)
cpu              1958 kernel/sched/deadline.c 		return cpu;
cpu              1968 kernel/sched/deadline.c 	int cpu;
cpu              1971 kernel/sched/deadline.c 		cpu = find_later_rq(task);
cpu              1973 kernel/sched/deadline.c 		if ((cpu == -1) || (cpu == rq->cpu))
cpu              1976 kernel/sched/deadline.c 		later_rq = cpu_rq(cpu);
cpu              1993 kernel/sched/deadline.c 				     !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
cpu              2031 kernel/sched/deadline.c 	BUG_ON(rq->cpu != task_cpu(p));
cpu              2107 kernel/sched/deadline.c 	set_task_cpu(next_task, later_rq->cpu);
cpu              2136 kernel/sched/deadline.c 	int this_cpu = this_rq->cpu, cpu;
cpu              2151 kernel/sched/deadline.c 	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
cpu              2152 kernel/sched/deadline.c 		if (this_cpu == cpu)
cpu              2155 kernel/sched/deadline.c 		src_rq = cpu_rq(cpu);
cpu              2270 kernel/sched/deadline.c 	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
cpu              2272 kernel/sched/deadline.c 		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
cpu              2281 kernel/sched/deadline.c 	cpudl_clear(&rq->rd->cpudl, rq->cpu);
cpu              2282 kernel/sched/deadline.c 	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
cpu              2472 kernel/sched/deadline.c 	int cpu, ret = 0;
cpu              2484 kernel/sched/deadline.c 	for_each_possible_cpu(cpu) {
cpu              2486 kernel/sched/deadline.c 		dl_b = dl_bw_of(cpu);
cpu              2519 kernel/sched/deadline.c 	int cpu;
cpu              2531 kernel/sched/deadline.c 	for_each_possible_cpu(cpu) {
cpu              2533 kernel/sched/deadline.c 		dl_b = dl_bw_of(cpu);
cpu              2540 kernel/sched/deadline.c 		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
cpu              2770 kernel/sched/deadline.c bool dl_cpu_busy(unsigned int cpu)
cpu              2778 kernel/sched/deadline.c 	dl_b = dl_bw_of(cpu);
cpu              2780 kernel/sched/deadline.c 	cpus = dl_bw_cpus(cpu);
cpu              2790 kernel/sched/deadline.c void print_dl_stats(struct seq_file *m, int cpu)
cpu              2792 kernel/sched/deadline.c 	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
cpu               269 kernel/sched/debug.c static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
cpu               276 kernel/sched/debug.c 	for_each_domain(cpu, sd)
cpu               283 kernel/sched/debug.c 	for_each_domain(cpu, sd) {
cpu               359 kernel/sched/debug.c void dirty_sched_domain_sysctl(int cpu)
cpu               362 kernel/sched/debug.c 		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
cpu               375 kernel/sched/debug.c static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
cpu               377 kernel/sched/debug.c 	struct sched_entity *se = tg->se[cpu];
cpu               483 kernel/sched/debug.c void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cpu               487 kernel/sched/debug.c 	struct rq *rq = cpu_rq(cpu);
cpu               493 kernel/sched/debug.c 	SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
cpu               496 kernel/sched/debug.c 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
cpu               557 kernel/sched/debug.c 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
cpu               561 kernel/sched/debug.c void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
cpu               565 kernel/sched/debug.c 	SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
cpu               568 kernel/sched/debug.c 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
cpu               591 kernel/sched/debug.c void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
cpu               596 kernel/sched/debug.c 	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
cpu               604 kernel/sched/debug.c 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
cpu               614 kernel/sched/debug.c static void print_cpu(struct seq_file *m, int cpu)
cpu               616 kernel/sched/debug.c 	struct rq *rq = cpu_rq(cpu);
cpu               624 kernel/sched/debug.c 			   cpu, freq / 1000, (freq % 1000));
cpu               627 kernel/sched/debug.c 	SEQ_printf(m, "cpu#%d\n", cpu);
cpu               670 kernel/sched/debug.c 	print_cfs_stats(m, cpu);
cpu               671 kernel/sched/debug.c 	print_rt_stats(m, cpu);
cpu               672 kernel/sched/debug.c 	print_dl_stats(m, cpu);
cpu               674 kernel/sched/debug.c 	print_rq(m, rq, cpu);
cpu               739 kernel/sched/debug.c 	int cpu = (unsigned long)(v - 2);
cpu               741 kernel/sched/debug.c 	if (cpu != -1)
cpu               742 kernel/sched/debug.c 		print_cpu(m, cpu);
cpu               751 kernel/sched/debug.c 	int cpu;
cpu               754 kernel/sched/debug.c 	for_each_online_cpu(cpu)
cpu               755 kernel/sched/debug.c 		print_cpu(NULL, cpu);
cpu                93 kernel/sched/fair.c int __weak arch_asym_cpu_priority(int cpu)
cpu                95 kernel/sched/fair.c 	return -cpu;
cpu               294 kernel/sched/fair.c 	int cpu = cpu_of(rq);
cpu               311 kernel/sched/fair.c 	    cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
cpu               319 kernel/sched/fair.c 			&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
cpu               727 kernel/sched/fair.c static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
cpu               729 kernel/sched/fair.c static unsigned long capacity_of(int cpu);
cpu              1492 kernel/sched/fair.c 	int cpu;
cpu              1495 kernel/sched/fair.c 	for_each_cpu(cpu, cpumask_of_node(nid)) {
cpu              1496 kernel/sched/fair.c 		struct rq *rq = cpu_rq(cpu);
cpu              1499 kernel/sched/fair.c 		ns->compute_capacity += capacity_of(cpu);
cpu              1715 kernel/sched/fair.c 	int cpu;
cpu              1727 kernel/sched/fair.c 	for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
cpu              1729 kernel/sched/fair.c 		if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
cpu              1732 kernel/sched/fair.c 		env->dst_cpu = cpu;
cpu              2250 kernel/sched/fair.c 	int cpu = cpupid_to_cpu(cpupid);
cpu              2280 kernel/sched/fair.c 	tsk = READ_ONCE(cpu_rq(cpu)->curr);
cpu              3742 kernel/sched/fair.c 	int cpu;
cpu              3780 kernel/sched/fair.c 	cpu = cpu_of(rq_of(cfs_rq));
cpu              3781 kernel/sched/fair.c 	if (task_util(p) > capacity_orig_of(cpu))
cpu              4881 kernel/sched/fair.c static void sync_throttle(struct task_group *tg, int cpu)
cpu              4891 kernel/sched/fair.c 	cfs_rq = tg->cfs_rq[cpu];
cpu              4892 kernel/sched/fair.c 	pcfs_rq = tg->parent->cfs_rq[cpu];
cpu              4895 kernel/sched/fair.c 	cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
cpu              5095 kernel/sched/fair.c static inline void sync_throttle(struct task_group *tg, int cpu) {}
cpu              5183 kernel/sched/fair.c static inline unsigned long cpu_util(int cpu);
cpu              5185 kernel/sched/fair.c static inline bool cpu_overutilized(int cpu)
cpu              5187 kernel/sched/fair.c 	return !fits_capacity(cpu_util(cpu), capacity_of(cpu));
cpu              5192 kernel/sched/fair.c 	if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
cpu              5390 kernel/sched/fair.c static int sched_idle_cpu(int cpu)
cpu              5392 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
cpu              5403 kernel/sched/fair.c static unsigned long capacity_of(int cpu)
cpu              5405 kernel/sched/fair.c 	return cpu_rq(cpu)->cpu_capacity;
cpu              5408 kernel/sched/fair.c static unsigned long cpu_avg_load_per_task(int cpu)
cpu              5410 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
cpu              5566 kernel/sched/fair.c static unsigned long cpu_util_without(int cpu, struct task_struct *p);
cpu              5568 kernel/sched/fair.c static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
cpu              5570 kernel/sched/fair.c 	return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
cpu              5773 kernel/sched/fair.c 				  int cpu, int prev_cpu, int sd_flag)
cpu              5775 kernel/sched/fair.c 	int new_cpu = cpu;
cpu              5797 kernel/sched/fair.c 		group = find_idlest_group(sd, p, cpu, sd_flag);
cpu              5803 kernel/sched/fair.c 		new_cpu = find_idlest_group_cpu(group, p, cpu);
cpu              5804 kernel/sched/fair.c 		if (new_cpu == cpu) {
cpu              5811 kernel/sched/fair.c 		cpu = new_cpu;
cpu              5814 kernel/sched/fair.c 		for_each_domain(cpu, tmp) {
cpu              5829 kernel/sched/fair.c static inline void set_idle_cores(int cpu, int val)
cpu              5833 kernel/sched/fair.c 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
cpu              5838 kernel/sched/fair.c static inline bool test_idle_cores(int cpu, bool def)
cpu              5842 kernel/sched/fair.c 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
cpu              5859 kernel/sched/fair.c 	int cpu;
cpu              5865 kernel/sched/fair.c 	for_each_cpu(cpu, cpu_smt_mask(core)) {
cpu              5866 kernel/sched/fair.c 		if (cpu == core)
cpu              5869 kernel/sched/fair.c 		if (!available_idle_cpu(cpu))
cpu              5886 kernel/sched/fair.c 	int core, cpu;
cpu              5899 kernel/sched/fair.c 		for_each_cpu(cpu, cpu_smt_mask(core)) {
cpu              5900 kernel/sched/fair.c 			__cpumask_clear_cpu(cpu, cpus);
cpu              5901 kernel/sched/fair.c 			if (!available_idle_cpu(cpu))
cpu              5922 kernel/sched/fair.c 	int cpu, si_cpu = -1;
cpu              5927 kernel/sched/fair.c 	for_each_cpu(cpu, cpu_smt_mask(target)) {
cpu              5928 kernel/sched/fair.c 		if (!cpumask_test_cpu(cpu, p->cpus_ptr))
cpu              5930 kernel/sched/fair.c 		if (available_idle_cpu(cpu))
cpu              5931 kernel/sched/fair.c 			return cpu;
cpu              5932 kernel/sched/fair.c 		if (si_cpu == -1 && sched_idle_cpu(cpu))
cpu              5933 kernel/sched/fair.c 			si_cpu = cpu;
cpu              5966 kernel/sched/fair.c 	int cpu, nr = INT_MAX, si_cpu = -1;
cpu              5994 kernel/sched/fair.c 	for_each_cpu_wrap(cpu, cpus, target) {
cpu              5997 kernel/sched/fair.c 		if (available_idle_cpu(cpu))
cpu              5999 kernel/sched/fair.c 		if (si_cpu == -1 && sched_idle_cpu(cpu))
cpu              6000 kernel/sched/fair.c 			si_cpu = cpu;
cpu              6008 kernel/sched/fair.c 	return cpu;
cpu              6101 kernel/sched/fair.c static inline unsigned long cpu_util(int cpu)
cpu              6106 kernel/sched/fair.c 	cfs_rq = &cpu_rq(cpu)->cfs;
cpu              6112 kernel/sched/fair.c 	return min_t(unsigned long, util, capacity_orig_of(cpu));
cpu              6128 kernel/sched/fair.c static unsigned long cpu_util_without(int cpu, struct task_struct *p)
cpu              6134 kernel/sched/fair.c 	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
cpu              6135 kernel/sched/fair.c 		return cpu_util(cpu);
cpu              6137 kernel/sched/fair.c 	cfs_rq = &cpu_rq(cpu)->cfs;
cpu              6201 kernel/sched/fair.c 	return min_t(unsigned long, util, capacity_orig_of(cpu));
cpu              6211 kernel/sched/fair.c static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
cpu              6218 kernel/sched/fair.c 	min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
cpu              6219 kernel/sched/fair.c 	max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
cpu              6235 kernel/sched/fair.c static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
cpu              6237 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
cpu              6246 kernel/sched/fair.c 	if (task_cpu(p) == cpu && dst_cpu != cpu)
cpu              6248 kernel/sched/fair.c 	else if (task_cpu(p) != cpu && dst_cpu == cpu)
cpu              6260 kernel/sched/fair.c 		if (dst_cpu == cpu)
cpu              6266 kernel/sched/fair.c 	return min(util, capacity_orig_of(cpu));
cpu              6282 kernel/sched/fair.c 	int cpu;
cpu              6293 kernel/sched/fair.c 	for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
cpu              6294 kernel/sched/fair.c 		unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
cpu              6295 kernel/sched/fair.c 		struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
cpu              6303 kernel/sched/fair.c 		sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap,
cpu              6313 kernel/sched/fair.c 		cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap,
cpu              6365 kernel/sched/fair.c 	int cpu, best_energy_cpu = prev_cpu;
cpu              6397 kernel/sched/fair.c 		for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
cpu              6398 kernel/sched/fair.c 			if (!cpumask_test_cpu(cpu, p->cpus_ptr))
cpu              6402 kernel/sched/fair.c 			util = cpu_util_next(cpu, p, cpu);
cpu              6403 kernel/sched/fair.c 			cpu_cap = capacity_of(cpu);
cpu              6408 kernel/sched/fair.c 			if (cpu == prev_cpu) {
cpu              6421 kernel/sched/fair.c 				max_spare_cap_cpu = cpu;
cpu              6472 kernel/sched/fair.c 	int cpu = smp_processor_id();
cpu              6487 kernel/sched/fair.c 		want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
cpu              6488 kernel/sched/fair.c 			      cpumask_test_cpu(cpu, p->cpus_ptr);
cpu              6492 kernel/sched/fair.c 	for_each_domain(cpu, tmp) {
cpu              6502 kernel/sched/fair.c 			if (cpu != prev_cpu)
cpu              6503 kernel/sched/fair.c 				new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
cpu              6517 kernel/sched/fair.c 		new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
cpu              6524 kernel/sched/fair.c 			current->recent_used_cpu = cpu;
cpu              7263 kernel/sched/fair.c 		int cpu;
cpu              7281 kernel/sched/fair.c 		for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
cpu              7282 kernel/sched/fair.c 			if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
cpu              7284 kernel/sched/fair.c 				env->new_dst_cpu = cpu;
cpu              7591 kernel/sched/fair.c 	int cpu = cpu_of(rq);
cpu              7608 kernel/sched/fair.c 		se = cfs_rq->tg->se[cpu];
cpu              7692 kernel/sched/fair.c static void update_blocked_averages(int cpu)
cpu              7695 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
cpu              7771 kernel/sched/fair.c static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
cpu              7773 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
cpu              7774 kernel/sched/fair.c 	unsigned long max = arch_scale_cpu_capacity(cpu);
cpu              7794 kernel/sched/fair.c static void update_cpu_capacity(struct sched_domain *sd, int cpu)
cpu              7796 kernel/sched/fair.c 	unsigned long capacity = scale_rt_capacity(sd, cpu);
cpu              7799 kernel/sched/fair.c 	cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
cpu              7804 kernel/sched/fair.c 	cpu_rq(cpu)->cpu_capacity = capacity;
cpu              7810 kernel/sched/fair.c void update_group_capacity(struct sched_domain *sd, int cpu)
cpu              7822 kernel/sched/fair.c 		update_cpu_capacity(sd, cpu);
cpu              7836 kernel/sched/fair.c 		for_each_cpu(cpu, sched_group_span(sdg)) {
cpu              7838 kernel/sched/fair.c 			struct rq *rq = cpu_rq(cpu);
cpu              7852 kernel/sched/fair.c 				capacity += capacity_of(cpu);
cpu              8026 kernel/sched/fair.c 	unsigned int cpu = rq->cpu;
cpu              8031 kernel/sched/fair.c 	if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
cpu              8037 kernel/sched/fair.c 	update_blocked_averages(cpu);
cpu              8790 kernel/sched/fair.c 	int cpu, balance_cpu = -1;
cpu              8807 kernel/sched/fair.c 	for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
cpu              8808 kernel/sched/fair.c 		if (!idle_cpu(cpu))
cpu              8811 kernel/sched/fair.c 		balance_cpu = cpu;
cpu              8878 kernel/sched/fair.c 	env.src_cpu = busiest->cpu;
cpu              9185 kernel/sched/fair.c 			.src_cpu	= busiest_rq->cpu,
cpu              9242 kernel/sched/fair.c 	int cpu = rq->cpu;
cpu              9252 kernel/sched/fair.c 	for_each_domain(cpu, sd) {
cpu              9288 kernel/sched/fair.c 			if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
cpu              9294 kernel/sched/fair.c 				idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
cpu              9405 kernel/sched/fair.c 	int nr_busy, i, cpu = rq->cpu;
cpu              9451 kernel/sched/fair.c 	sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
cpu              9459 kernel/sched/fair.c 			if (sched_asym_prefer(i, cpu)) {
cpu              9466 kernel/sched/fair.c 	sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
cpu              9487 kernel/sched/fair.c 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
cpu              9511 kernel/sched/fair.c static void set_cpu_sd_state_busy(int cpu)
cpu              9516 kernel/sched/fair.c 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
cpu              9535 kernel/sched/fair.c 	cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
cpu              9538 kernel/sched/fair.c 	set_cpu_sd_state_busy(rq->cpu);
cpu              9541 kernel/sched/fair.c static void set_cpu_sd_state_idle(int cpu)
cpu              9546 kernel/sched/fair.c 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
cpu              9561 kernel/sched/fair.c void nohz_balance_enter_idle(int cpu)
cpu              9563 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
cpu              9565 kernel/sched/fair.c 	SCHED_WARN_ON(cpu != smp_processor_id());
cpu              9568 kernel/sched/fair.c 	if (!cpu_active(cpu))
cpu              9572 kernel/sched/fair.c 	if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
cpu              9597 kernel/sched/fair.c 	cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
cpu              9607 kernel/sched/fair.c 	set_cpu_sd_state_idle(cpu);
cpu              9632 kernel/sched/fair.c 	int this_cpu = this_rq->cpu;
cpu              9731 kernel/sched/fair.c 	int this_cpu = this_rq->cpu;
cpu              9754 kernel/sched/fair.c 	int this_cpu = this_rq->cpu;
cpu              9802 kernel/sched/fair.c 	int this_cpu = this_rq->cpu;
cpu              9935 kernel/sched/fair.c 	update_blocked_averages(this_rq->cpu);
cpu              10341 kernel/sched/fair.c 	int cpu;
cpu              10343 kernel/sched/fair.c 	for_each_possible_cpu(cpu) {
cpu              10344 kernel/sched/fair.c 		if (tg->se[cpu])
cpu              10345 kernel/sched/fair.c 			remove_entity_load_avg(tg->se[cpu]);
cpu              10351 kernel/sched/fair.c 		if (!tg->cfs_rq[cpu]->on_list)
cpu              10354 kernel/sched/fair.c 		rq = cpu_rq(cpu);
cpu              10357 kernel/sched/fair.c 		list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
cpu              10363 kernel/sched/fair.c 			struct sched_entity *se, int cpu,
cpu              10366 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
cpu              10372 kernel/sched/fair.c 	tg->cfs_rq[cpu] = cfs_rq;
cpu              10373 kernel/sched/fair.c 	tg->se[cpu] = se;
cpu              10511 kernel/sched/fair.c void print_cfs_stats(struct seq_file *m, int cpu)
cpu              10516 kernel/sched/fair.c 	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
cpu              10517 kernel/sched/fair.c 		print_cfs_rq(m, cpu, cfs_rq);
cpu               227 kernel/sched/idle.c 	int cpu = smp_processor_id();
cpu               245 kernel/sched/idle.c 		if (cpu_is_offline(cpu)) {
cpu               364 kernel/sched/idle.c select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
cpu                25 kernel/sched/isolation.c 	int cpu;
cpu                29 kernel/sched/isolation.c 			cpu = sched_numa_find_closest(housekeeping_mask, smp_processor_id());
cpu                30 kernel/sched/isolation.c 			if (cpu < nr_cpu_ids)
cpu                31 kernel/sched/isolation.c 				return cpu;
cpu                57 kernel/sched/isolation.c bool housekeeping_test_cpu(int cpu, enum hk_flags flags)
cpu                61 kernel/sched/isolation.c 			return cpumask_test_cpu(cpu, housekeeping_mask);
cpu                68 kernel/sched/membarrier.c 	int cpu;
cpu                85 kernel/sched/membarrier.c 	for_each_online_cpu(cpu) {
cpu                96 kernel/sched/membarrier.c 		if (cpu == raw_smp_processor_id())
cpu                99 kernel/sched/membarrier.c 		if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) &
cpu               108 kernel/sched/membarrier.c 		p = rcu_dereference(cpu_rq(cpu)->curr);
cpu               112 kernel/sched/membarrier.c 		__cpumask_set_cpu(cpu, tmpmask);
cpu               134 kernel/sched/membarrier.c 	int cpu;
cpu               164 kernel/sched/membarrier.c 	for_each_online_cpu(cpu) {
cpu               175 kernel/sched/membarrier.c 		if (cpu == raw_smp_processor_id())
cpu               177 kernel/sched/membarrier.c 		p = rcu_dereference(cpu_rq(cpu)->curr);
cpu               179 kernel/sched/membarrier.c 			__cpumask_set_cpu(cpu, tmpmask);
cpu               204 kernel/sched/membarrier.c 	int cpu;
cpu               239 kernel/sched/membarrier.c 	for_each_online_cpu(cpu) {
cpu               240 kernel/sched/membarrier.c 		struct rq *rq = cpu_rq(cpu);
cpu               245 kernel/sched/membarrier.c 			__cpumask_set_cpu(cpu, tmpmask);
cpu               184 kernel/sched/psi.c 	int cpu;
cpu               186 kernel/sched/psi.c 	for_each_possible_cpu(cpu)
cpu               187 kernel/sched/psi.c 		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
cpu               237 kernel/sched/psi.c static void get_recent_times(struct psi_group *group, int cpu,
cpu               241 kernel/sched/psi.c 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
cpu               252 kernel/sched/psi.c 		now = cpu_clock(cpu);
cpu               309 kernel/sched/psi.c 	int cpu;
cpu               320 kernel/sched/psi.c 	for_each_possible_cpu(cpu) {
cpu               325 kernel/sched/psi.c 		get_recent_times(group, cpu, aggregator, times,
cpu               626 kernel/sched/psi.c static void record_times(struct psi_group_cpu *groupc, int cpu,
cpu               632 kernel/sched/psi.c 	now = cpu_clock(cpu);
cpu               672 kernel/sched/psi.c static u32 psi_group_change(struct psi_group *group, int cpu,
cpu               680 kernel/sched/psi.c 	groupc = per_cpu_ptr(group->pcpu, cpu);
cpu               692 kernel/sched/psi.c 	record_times(groupc, cpu, false);
cpu               699 kernel/sched/psi.c 					cpu, t, groupc->tasks[0],
cpu               749 kernel/sched/psi.c 	int cpu = task_cpu(task);
cpu               761 kernel/sched/psi.c 				task->pid, task->comm, cpu,
cpu               781 kernel/sched/psi.c 		u32 state_mask = psi_group_change(group, cpu, clear, set);
cpu               791 kernel/sched/psi.c void psi_memstall_tick(struct task_struct *task, int cpu)
cpu               799 kernel/sched/psi.c 		groupc = per_cpu_ptr(group->pcpu, cpu);
cpu               801 kernel/sched/psi.c 		record_times(groupc, cpu, true);
cpu               157 kernel/sched/rt.c 		struct sched_rt_entity *rt_se, int cpu,
cpu               160 kernel/sched/rt.c 	struct rq *rq = cpu_rq(cpu);
cpu               167 kernel/sched/rt.c 	tg->rt_rq[cpu] = rt_rq;
cpu               168 kernel/sched/rt.c 	tg->rt_se[cpu] = rt_se;
cpu               279 kernel/sched/rt.c 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
cpu               300 kernel/sched/rt.c 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
cpu               366 kernel/sched/rt.c 	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
cpu               371 kernel/sched/rt.c 	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
cpu               492 kernel/sched/rt.c 	int cpu = cpu_of(rq);
cpu               494 kernel/sched/rt.c 	rt_se = rt_rq->tg->rt_se[cpu];
cpu               510 kernel/sched/rt.c 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
cpu               512 kernel/sched/rt.c 	rt_se = rt_rq->tg->rt_se[cpu];
cpu               553 kernel/sched/rt.c struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
cpu               555 kernel/sched/rt.c 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
cpu               615 kernel/sched/rt.c struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
cpu               617 kernel/sched/rt.c 	return &cpu_rq(cpu)->rt;
cpu              1049 kernel/sched/rt.c 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
cpu              1065 kernel/sched/rt.c 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
cpu              1390 kernel/sched/rt.c select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
cpu              1399 kernel/sched/rt.c 	rq = cpu_rq(cpu);
cpu              1437 kernel/sched/rt.c 			cpu = target;
cpu              1442 kernel/sched/rt.c 	return cpu;
cpu              1604 kernel/sched/rt.c static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
cpu              1607 kernel/sched/rt.c 	    cpumask_test_cpu(cpu, p->cpus_ptr))
cpu              1617 kernel/sched/rt.c static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
cpu              1626 kernel/sched/rt.c 		if (pick_rt_task(rq, p, cpu))
cpu              1640 kernel/sched/rt.c 	int cpu      = task_cpu(task);
cpu              1660 kernel/sched/rt.c 	if (cpumask_test_cpu(cpu, lowest_mask))
cpu              1661 kernel/sched/rt.c 		return cpu;
cpu              1671 kernel/sched/rt.c 	for_each_domain(cpu, sd) {
cpu              1703 kernel/sched/rt.c 	cpu = cpumask_any(lowest_mask);
cpu              1704 kernel/sched/rt.c 	if (cpu < nr_cpu_ids)
cpu              1705 kernel/sched/rt.c 		return cpu;
cpu              1715 kernel/sched/rt.c 	int cpu;
cpu              1718 kernel/sched/rt.c 		cpu = find_lowest_rq(task);
cpu              1720 kernel/sched/rt.c 		if ((cpu == -1) || (cpu == rq->cpu))
cpu              1723 kernel/sched/rt.c 		lowest_rq = cpu_rq(cpu);
cpu              1744 kernel/sched/rt.c 				     !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
cpu              1777 kernel/sched/rt.c 	BUG_ON(rq->cpu != task_cpu(p));
cpu              1858 kernel/sched/rt.c 	set_task_cpu(next_task, lowest_rq->cpu);
cpu              1925 kernel/sched/rt.c 	int cpu;
cpu              1943 kernel/sched/rt.c 		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
cpu              1945 kernel/sched/rt.c 		rd->rto_cpu = cpu;
cpu              1947 kernel/sched/rt.c 		if (cpu < nr_cpu_ids)
cpu              1948 kernel/sched/rt.c 			return cpu;
cpu              1981 kernel/sched/rt.c 	int cpu = -1;
cpu              1999 kernel/sched/rt.c 		cpu = rto_next_cpu(rq->rd);
cpu              2005 kernel/sched/rt.c 	if (cpu >= 0) {
cpu              2008 kernel/sched/rt.c 		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
cpu              2018 kernel/sched/rt.c 	int cpu;
cpu              2035 kernel/sched/rt.c 	cpu = rto_next_cpu(rd);
cpu              2039 kernel/sched/rt.c 	if (cpu < 0) {
cpu              2045 kernel/sched/rt.c 	irq_work_queue_on(&rd->rto_push_work, cpu);
cpu              2051 kernel/sched/rt.c 	int this_cpu = this_rq->cpu, cpu;
cpu              2068 kernel/sched/rt.c 	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
cpu              2078 kernel/sched/rt.c 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
cpu              2079 kernel/sched/rt.c 		if (this_cpu == cpu)
cpu              2082 kernel/sched/rt.c 		src_rq = cpu_rq(cpu);
cpu              2170 kernel/sched/rt.c 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
cpu              2181 kernel/sched/rt.c 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
cpu              2713 kernel/sched/rt.c void print_rt_stats(struct seq_file *m, int cpu)
cpu              2719 kernel/sched/rt.c 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
cpu              2720 kernel/sched/rt.c 		print_rt_rq(m, cpu, rt_rq);
cpu               325 kernel/sched/sched.h extern bool dl_cpu_busy(unsigned int cpu);
cpu               450 kernel/sched/sched.h 			struct sched_entity *se, int cpu,
cpu               461 kernel/sched/sched.h 		struct sched_rt_entity *rt_se, int cpu,
cpu               943 kernel/sched/sched.h 	int			cpu;
cpu              1029 kernel/sched/sched.h 	return rq->cpu;
cpu              1051 kernel/sched/sched.h #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
cpu              1054 kernel/sched/sched.h #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
cpu              1279 kernel/sched/sched.h extern void sched_domains_numa_masks_set(unsigned int cpu);
cpu              1280 kernel/sched/sched.h extern void sched_domains_numa_masks_clear(unsigned int cpu);
cpu              1281 kernel/sched/sched.h extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
cpu              1284 kernel/sched/sched.h static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
cpu              1285 kernel/sched/sched.h static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
cpu              1286 kernel/sched/sched.h static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
cpu              1301 kernel/sched/sched.h extern int migrate_task_to(struct task_struct *p, int cpu);
cpu              1303 kernel/sched/sched.h 			int cpu, int scpu);
cpu              1342 kernel/sched/sched.h #define for_each_domain(cpu, __sd) \
cpu              1343 kernel/sched/sched.h 	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
cpu              1357 kernel/sched/sched.h static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
cpu              1361 kernel/sched/sched.h 	for_each_domain(cpu, sd) {
cpu              1370 kernel/sched/sched.h static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
cpu              1374 kernel/sched/sched.h 	for_each_domain(cpu, sd) {
cpu              1454 kernel/sched/sched.h void dirty_sched_domain_sysctl(int cpu);
cpu              1460 kernel/sched/sched.h static inline void dirty_sched_domain_sysctl(int cpu)
cpu              1502 kernel/sched/sched.h static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
cpu              1509 kernel/sched/sched.h 	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
cpu              1510 kernel/sched/sched.h 	p->se.cfs_rq = tg->cfs_rq[cpu];
cpu              1511 kernel/sched/sched.h 	p->se.parent = tg->se[cpu];
cpu              1515 kernel/sched/sched.h 	p->rt.rt_rq  = tg->rt_rq[cpu];
cpu              1516 kernel/sched/sched.h 	p->rt.parent = tg->rt_se[cpu];
cpu              1522 kernel/sched/sched.h static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
cpu              1530 kernel/sched/sched.h static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
cpu              1532 kernel/sched/sched.h 	set_task_rq(p, cpu);
cpu              1541 kernel/sched/sched.h 	WRITE_ONCE(p->cpu, cpu);
cpu              1543 kernel/sched/sched.h 	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
cpu              1545 kernel/sched/sched.h 	p->wake_cpu = cpu;
cpu              1832 kernel/sched/sched.h extern void update_group_capacity(struct sched_domain *sd, int cpu);
cpu              1878 kernel/sched/sched.h extern void resched_cpu(int cpu);
cpu              1908 kernel/sched/sched.h 	int cpu;
cpu              1913 kernel/sched/sched.h 	cpu = cpu_of(rq);
cpu              1915 kernel/sched/sched.h 	if (!tick_nohz_full_cpu(cpu))
cpu              1919 kernel/sched/sched.h 		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
cpu              1921 kernel/sched/sched.h 		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
cpu              1988 kernel/sched/sched.h unsigned long arch_scale_freq_capacity(int cpu)
cpu              2184 kernel/sched/sched.h extern void print_cfs_stats(struct seq_file *m, int cpu);
cpu              2185 kernel/sched/sched.h extern void print_rt_stats(struct seq_file *m, int cpu);
cpu              2186 kernel/sched/sched.h extern void print_dl_stats(struct seq_file *m, int cpu);
cpu              2187 kernel/sched/sched.h extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
cpu              2188 kernel/sched/sched.h extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
cpu              2189 kernel/sched/sched.h extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
cpu              2215 kernel/sched/sched.h #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
cpu              2264 kernel/sched/sched.h static inline u64 irq_time_read(int cpu)
cpu              2266 kernel/sched/sched.h 	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
cpu              2368 kernel/sched/sched.h static inline unsigned long capacity_orig_of(int cpu)
cpu              2370 kernel/sched/sched.h 	return cpu_rq(cpu)->cpu_capacity_orig;
cpu              2391 kernel/sched/sched.h unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
cpu              2422 kernel/sched/sched.h static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
cpu                17 kernel/sched/stats.c 	int cpu;
cpu                28 kernel/sched/stats.c 		cpu = (unsigned long)(v - 2);
cpu                29 kernel/sched/stats.c 		rq = cpu_rq(cpu);
cpu                34 kernel/sched/stats.c 		    cpu, rq->yld_count,
cpu                45 kernel/sched/stats.c 		for_each_domain(cpu, sd) {
cpu                14 kernel/sched/stop_task.c select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
cpu                28 kernel/sched/topology.c static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpu                47 kernel/sched/topology.c 	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
cpu                48 kernel/sched/topology.c 		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
cpu                50 kernel/sched/topology.c 	if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
cpu                51 kernel/sched/topology.c 		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
cpu               115 kernel/sched/topology.c static void sched_domain_debug(struct sched_domain *sd, int cpu)
cpu               123 kernel/sched/topology.c 		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
cpu               127 kernel/sched/topology.c 	printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
cpu               130 kernel/sched/topology.c 		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
cpu               141 kernel/sched/topology.c # define sched_domain_debug(sd, cpu) do { } while (0)
cpu               246 kernel/sched/topology.c static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
cpu               249 kernel/sched/topology.c 		if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
cpu               257 kernel/sched/topology.c static struct perf_domain *pd_init(int cpu)
cpu               259 kernel/sched/topology.c 	struct em_perf_domain *obj = em_cpu_get(cpu);
cpu               264 kernel/sched/topology.c 			pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
cpu               346 kernel/sched/topology.c 	int cpu = cpumask_first(cpu_map);
cpu               347 kernel/sched/topology.c 	struct root_domain *rd = cpu_rq(cpu)->rd;
cpu               355 kernel/sched/topology.c 	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
cpu               450 kernel/sched/topology.c 		if (cpumask_test_cpu(rq->cpu, old_rd->online))
cpu               453 kernel/sched/topology.c 		cpumask_clear_cpu(rq->cpu, old_rd->span);
cpu               467 kernel/sched/topology.c 	cpumask_set_cpu(rq->cpu, rd->span);
cpu               468 kernel/sched/topology.c 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
cpu               627 kernel/sched/topology.c static void update_top_cache_domain(int cpu)
cpu               631 kernel/sched/topology.c 	int id = cpu;
cpu               634 kernel/sched/topology.c 	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
cpu               641 kernel/sched/topology.c 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
cpu               642 kernel/sched/topology.c 	per_cpu(sd_llc_size, cpu) = size;
cpu               643 kernel/sched/topology.c 	per_cpu(sd_llc_id, cpu) = id;
cpu               644 kernel/sched/topology.c 	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
cpu               646 kernel/sched/topology.c 	sd = lowest_flag_domain(cpu, SD_NUMA);
cpu               647 kernel/sched/topology.c 	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
cpu               649 kernel/sched/topology.c 	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
cpu               650 kernel/sched/topology.c 	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
cpu               652 kernel/sched/topology.c 	sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
cpu               653 kernel/sched/topology.c 	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
cpu               661 kernel/sched/topology.c cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
cpu               663 kernel/sched/topology.c 	struct rq *rq = cpu_rq(cpu);
cpu               696 kernel/sched/topology.c 	sched_domain_debug(sd, cpu);
cpu               701 kernel/sched/topology.c 	dirty_sched_domain_sysctl(cpu);
cpu               704 kernel/sched/topology.c 	update_top_cache_domain(cpu);
cpu               877 kernel/sched/topology.c build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
cpu               883 kernel/sched/topology.c 			GFP_KERNEL, cpu_to_node(cpu));
cpu               904 kernel/sched/topology.c 	int cpu;
cpu               907 kernel/sched/topology.c 	cpu = cpumask_first_and(sched_group_span(sg), mask);
cpu               909 kernel/sched/topology.c 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
cpu               927 kernel/sched/topology.c build_overlap_sched_groups(struct sched_domain *sd, int cpu)
cpu               938 kernel/sched/topology.c 	for_each_cpu_wrap(i, span, cpu) {
cpu               959 kernel/sched/topology.c 		sg = build_group_from_child_sched_domain(sibling, cpu);
cpu              1057 kernel/sched/topology.c static struct sched_group *get_group(int cpu, struct sd_data *sdd)
cpu              1059 kernel/sched/topology.c 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
cpu              1065 kernel/sched/topology.c 		cpu = cpumask_first(sched_domain_span(child));
cpu              1067 kernel/sched/topology.c 	sg = *per_cpu_ptr(sdd->sg, cpu);
cpu              1068 kernel/sched/topology.c 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
cpu              1083 kernel/sched/topology.c 		cpumask_set_cpu(cpu, sched_group_span(sg));
cpu              1084 kernel/sched/topology.c 		cpumask_set_cpu(cpu, group_balance_mask(sg));
cpu              1102 kernel/sched/topology.c build_sched_groups(struct sched_domain *sd, int cpu)
cpu              1115 kernel/sched/topology.c 	for_each_cpu_wrap(i, span, cpu) {
cpu              1147 kernel/sched/topology.c static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
cpu              1154 kernel/sched/topology.c 		int cpu, max_cpu = -1;
cpu              1161 kernel/sched/topology.c 		for_each_cpu(cpu, sched_group_span(sg)) {
cpu              1163 kernel/sched/topology.c 				max_cpu = cpu;
cpu              1164 kernel/sched/topology.c 			else if (sched_asym_prefer(cpu, max_cpu))
cpu              1165 kernel/sched/topology.c 				max_cpu = cpu;
cpu              1173 kernel/sched/topology.c 	if (cpu != group_balance_cpu(sg))
cpu              1176 kernel/sched/topology.c 	update_group_capacity(sd, cpu);
cpu              1261 kernel/sched/topology.c static void claim_allocations(int cpu, struct sched_domain *sd)
cpu              1265 kernel/sched/topology.c 	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
cpu              1266 kernel/sched/topology.c 	*per_cpu_ptr(sdd->sd, cpu) = NULL;
cpu              1268 kernel/sched/topology.c 	if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
cpu              1269 kernel/sched/topology.c 		*per_cpu_ptr(sdd->sds, cpu) = NULL;
cpu              1271 kernel/sched/topology.c 	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
cpu              1272 kernel/sched/topology.c 		*per_cpu_ptr(sdd->sg, cpu) = NULL;
cpu              1274 kernel/sched/topology.c 	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
cpu              1275 kernel/sched/topology.c 		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
cpu              1317 kernel/sched/topology.c 	struct sched_domain *child, int dflags, int cpu)
cpu              1320 kernel/sched/topology.c 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
cpu              1330 kernel/sched/topology.c 	sd_weight = cpumask_weight(tl->mask(cpu));
cpu              1373 kernel/sched/topology.c 	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
cpu              1462 kernel/sched/topology.c static const struct cpumask *sd_numa_mask(int cpu)
cpu              1464 kernel/sched/topology.c 	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
cpu              1705 kernel/sched/topology.c void sched_domains_numa_masks_set(unsigned int cpu)
cpu              1707 kernel/sched/topology.c 	int node = cpu_to_node(cpu);
cpu              1713 kernel/sched/topology.c 				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
cpu              1718 kernel/sched/topology.c void sched_domains_numa_masks_clear(unsigned int cpu)
cpu              1724 kernel/sched/topology.c 			cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
cpu              1736 kernel/sched/topology.c int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
cpu              1738 kernel/sched/topology.c 	int i, j = cpu_to_node(cpu);
cpu              1741 kernel/sched/topology.c 		cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]);
cpu              1742 kernel/sched/topology.c 		if (cpu < nr_cpu_ids)
cpu              1743 kernel/sched/topology.c 			return cpu;
cpu              1857 kernel/sched/topology.c 		struct sched_domain *child, int dflags, int cpu)
cpu              1859 kernel/sched/topology.c 	struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
cpu              1890 kernel/sched/topology.c 			      const struct cpumask *cpu_map, int cpu)
cpu              1905 kernel/sched/topology.c 		if (i == cpu)
cpu              1913 kernel/sched/topology.c 		if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
cpu              1914 kernel/sched/topology.c 		    cpumask_intersects(tl->mask(cpu), tl->mask(i)))
cpu              2166 kernel/sched/topology.c 	unsigned int cpu = cpumask_any(cpu_map);
cpu              2169 kernel/sched/topology.c 	if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
cpu                43 kernel/smp.c   int smpcfd_prepare_cpu(unsigned int cpu)
cpu                45 kernel/smp.c   	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
cpu                48 kernel/smp.c   				     cpu_to_node(cpu)))
cpu                51 kernel/smp.c   				     cpu_to_node(cpu))) {
cpu                65 kernel/smp.c   int smpcfd_dead_cpu(unsigned int cpu)
cpu                67 kernel/smp.c   	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
cpu                75 kernel/smp.c   int smpcfd_dying_cpu(unsigned int cpu)
cpu               142 kernel/smp.c   static int generic_exec_single(int cpu, call_single_data_t *csd,
cpu               145 kernel/smp.c   	if (cpu == smp_processor_id()) {
cpu               160 kernel/smp.c   	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
cpu               179 kernel/smp.c   	if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
cpu               180 kernel/smp.c   		arch_send_call_function_single_ipi(cpu);
cpu               269 kernel/smp.c   int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
cpu               308 kernel/smp.c   	err = generic_exec_single(cpu, csd, func, info);
cpu               335 kernel/smp.c   int smp_call_function_single_async(int cpu, call_single_data_t *csd)
cpu               348 kernel/smp.c   	err = generic_exec_single(cpu, csd, csd->func, csd->info);
cpu               372 kernel/smp.c   	unsigned int cpu;
cpu               377 kernel/smp.c   	cpu = get_cpu();
cpu               378 kernel/smp.c   	if (cpumask_test_cpu(cpu, mask))
cpu               382 kernel/smp.c   	nodemask = cpumask_of_node(cpu_to_node(cpu));
cpu               383 kernel/smp.c   	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
cpu               384 kernel/smp.c   	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
cpu               385 kernel/smp.c   		if (cpu_online(cpu))
cpu               390 kernel/smp.c   	cpu = cpumask_any_and(mask, cpu_online_mask);
cpu               392 kernel/smp.c   	ret = smp_call_function_single(cpu, func, info, wait);
cpu               416 kernel/smp.c   	int cpu, next_cpu, this_cpu = smp_processor_id();
cpu               436 kernel/smp.c   	cpu = cpumask_first_and(mask, cpu_online_mask);
cpu               437 kernel/smp.c   	if (cpu == this_cpu)
cpu               438 kernel/smp.c   		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
cpu               441 kernel/smp.c   	if (cpu >= nr_cpu_ids)
cpu               445 kernel/smp.c   	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
cpu               451 kernel/smp.c   		smp_call_function_single(cpu, func, info, wait);
cpu               465 kernel/smp.c   	for_each_cpu(cpu, cfd->cpumask) {
cpu               466 kernel/smp.c   		call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
cpu               473 kernel/smp.c   		if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
cpu               474 kernel/smp.c   			__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
cpu               481 kernel/smp.c   		for_each_cpu(cpu, cfd->cpumask) {
cpu               484 kernel/smp.c   			csd = per_cpu_ptr(cfd->csd, cpu);
cpu               581 kernel/smp.c   	unsigned int cpu;
cpu               589 kernel/smp.c   	for_each_present_cpu(cpu) {
cpu               592 kernel/smp.c   		if (!cpu_online(cpu))
cpu               593 kernel/smp.c   			cpu_up(cpu);
cpu               643 kernel/smp.c   	int cpu = get_cpu();
cpu               646 kernel/smp.c   	if (cpumask_test_cpu(cpu, mask)) {
cpu               683 kernel/smp.c   void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
cpu               688 kernel/smp.c   	int cpu, ret;
cpu               694 kernel/smp.c   		for_each_cpu(cpu, mask)
cpu               695 kernel/smp.c   			if (cond_func(cpu, info))
cpu               696 kernel/smp.c   				__cpumask_set_cpu(cpu, cpus);
cpu               706 kernel/smp.c   		for_each_cpu(cpu, mask)
cpu               707 kernel/smp.c   			if (cond_func(cpu, info)) {
cpu               708 kernel/smp.c   				ret = smp_call_function_single(cpu, func,
cpu               717 kernel/smp.c   void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
cpu               757 kernel/smp.c   	int cpu;
cpu               760 kernel/smp.c   	for_each_online_cpu(cpu) {
cpu               761 kernel/smp.c   		if (cpu == smp_processor_id())
cpu               764 kernel/smp.c   		wake_up_if_idle(cpu);
cpu               783 kernel/smp.c   	int			cpu;
cpu               791 kernel/smp.c   	if (sscs->cpu >= 0)
cpu               792 kernel/smp.c   		hypervisor_pin_vcpu(sscs->cpu);
cpu               794 kernel/smp.c   	if (sscs->cpu >= 0)
cpu               800 kernel/smp.c   int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
cpu               806 kernel/smp.c   		.cpu  = phys ? cpu : -1,
cpu               811 kernel/smp.c   	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
cpu               814 kernel/smp.c   	queue_work_on(cpu, system_wq, &sscs.work);
cpu                30 kernel/smpboot.c struct task_struct *idle_thread_get(unsigned int cpu)
cpu                32 kernel/smpboot.c 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
cpu                36 kernel/smpboot.c 	init_idle(tsk, cpu);
cpu                51 kernel/smpboot.c static inline void idle_init(unsigned int cpu)
cpu                53 kernel/smpboot.c 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
cpu                56 kernel/smpboot.c 		tsk = fork_idle(cpu);
cpu                58 kernel/smpboot.c 			pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
cpu                60 kernel/smpboot.c 			per_cpu(idle_threads, cpu) = tsk;
cpu                69 kernel/smpboot.c 	unsigned int cpu, boot_cpu;
cpu                73 kernel/smpboot.c 	for_each_possible_cpu(cpu) {
cpu                74 kernel/smpboot.c 		if (cpu != boot_cpu)
cpu                75 kernel/smpboot.c 			idle_init(cpu);
cpu                86 kernel/smpboot.c 	unsigned int			cpu;
cpu               120 kernel/smpboot.c 				ht->cleanup(td->cpu, cpu_online(td->cpu));
cpu               129 kernel/smpboot.c 				BUG_ON(td->cpu != smp_processor_id());
cpu               130 kernel/smpboot.c 				ht->park(td->cpu);
cpu               138 kernel/smpboot.c 		BUG_ON(td->cpu != smp_processor_id());
cpu               146 kernel/smpboot.c 				ht->setup(td->cpu);
cpu               154 kernel/smpboot.c 				ht->unpark(td->cpu);
cpu               159 kernel/smpboot.c 		if (!ht->thread_should_run(td->cpu)) {
cpu               165 kernel/smpboot.c 			ht->thread_fn(td->cpu);
cpu               171 kernel/smpboot.c __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
cpu               173 kernel/smpboot.c 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
cpu               179 kernel/smpboot.c 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
cpu               182 kernel/smpboot.c 	td->cpu = cpu;
cpu               185 kernel/smpboot.c 	tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
cpu               197 kernel/smpboot.c 	*per_cpu_ptr(ht->store, cpu) = tsk;
cpu               208 kernel/smpboot.c 			ht->create(cpu);
cpu               213 kernel/smpboot.c int smpboot_create_threads(unsigned int cpu)
cpu               220 kernel/smpboot.c 		ret = __smpboot_create_thread(cur, cpu);
cpu               228 kernel/smpboot.c static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
cpu               230 kernel/smpboot.c 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
cpu               236 kernel/smpboot.c int smpboot_unpark_threads(unsigned int cpu)
cpu               242 kernel/smpboot.c 		smpboot_unpark_thread(cur, cpu);
cpu               247 kernel/smpboot.c static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
cpu               249 kernel/smpboot.c 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
cpu               255 kernel/smpboot.c int smpboot_park_threads(unsigned int cpu)
cpu               261 kernel/smpboot.c 		smpboot_park_thread(cur, cpu);
cpu               268 kernel/smpboot.c 	unsigned int cpu;
cpu               271 kernel/smpboot.c 	for_each_possible_cpu(cpu) {
cpu               272 kernel/smpboot.c 		struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
cpu               277 kernel/smpboot.c 			*per_cpu_ptr(ht->store, cpu) = NULL;
cpu               291 kernel/smpboot.c 	unsigned int cpu;
cpu               296 kernel/smpboot.c 	for_each_online_cpu(cpu) {
cpu               297 kernel/smpboot.c 		ret = __smpboot_create_thread(plug_thread, cpu);
cpu               302 kernel/smpboot.c 		smpboot_unpark_thread(plug_thread, cpu);
cpu               335 kernel/smpboot.c int cpu_report_state(int cpu)
cpu               337 kernel/smpboot.c 	return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
cpu               352 kernel/smpboot.c int cpu_check_up_prepare(int cpu)
cpu               355 kernel/smpboot.c 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
cpu               359 kernel/smpboot.c 	switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
cpu               364 kernel/smpboot.c 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
cpu               408 kernel/smpboot.c void cpu_set_state_online(int cpu)
cpu               410 kernel/smpboot.c 	(void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
cpu               418 kernel/smpboot.c bool cpu_wait_death(unsigned int cpu, int seconds)
cpu               428 kernel/smpboot.c 	if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
cpu               433 kernel/smpboot.c 	while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
cpu               441 kernel/smpboot.c 	oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
cpu               445 kernel/smpboot.c 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
cpu               448 kernel/smpboot.c 		if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
cpu               469 kernel/smpboot.c 	int cpu = smp_processor_id();
cpu               472 kernel/smpboot.c 		oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
cpu               477 kernel/smpboot.c 	} while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
cpu                 8 kernel/smpboot.h struct task_struct *idle_thread_get(unsigned int cpu);
cpu                12 kernel/smpboot.h static inline struct task_struct *idle_thread_get(unsigned int cpu) { return NULL; }
cpu                17 kernel/smpboot.h int smpboot_create_threads(unsigned int cpu);
cpu                18 kernel/smpboot.h int smpboot_park_threads(unsigned int cpu);
cpu                19 kernel/smpboot.h int smpboot_unpark_threads(unsigned int cpu);
cpu               390 kernel/softirq.c 	int cpu = smp_processor_id();
cpu               393 kernel/softirq.c 	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
cpu               577 kernel/softirq.c 	int cpu;
cpu               579 kernel/softirq.c 	for_each_possible_cpu(cpu) {
cpu               580 kernel/softirq.c 		per_cpu(tasklet_vec, cpu).tail =
cpu               581 kernel/softirq.c 			&per_cpu(tasklet_vec, cpu).head;
cpu               582 kernel/softirq.c 		per_cpu(tasklet_hi_vec, cpu).tail =
cpu               583 kernel/softirq.c 			&per_cpu(tasklet_hi_vec, cpu).head;
cpu               590 kernel/softirq.c static int ksoftirqd_should_run(unsigned int cpu)
cpu               595 kernel/softirq.c static void run_ksoftirqd(unsigned int cpu)
cpu               621 kernel/softirq.c void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
cpu               625 kernel/softirq.c 	BUG_ON(cpu_online(cpu));
cpu               632 kernel/softirq.c 	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
cpu               637 kernel/softirq.c 				per_cpu(tasklet_vec, cpu).tail = i;
cpu               644 kernel/softirq.c static int takeover_tasklets(unsigned int cpu)
cpu               650 kernel/softirq.c 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
cpu               651 kernel/softirq.c 		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
cpu               652 kernel/softirq.c 		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
cpu               653 kernel/softirq.c 		per_cpu(tasklet_vec, cpu).head = NULL;
cpu               654 kernel/softirq.c 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
cpu               658 kernel/softirq.c 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
cpu               659 kernel/softirq.c 		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
cpu               660 kernel/softirq.c 		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
cpu               661 kernel/softirq.c 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
cpu               662 kernel/softirq.c 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
cpu                77 kernel/stop_machine.c static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
cpu                79 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
cpu               123 kernel/stop_machine.c int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
cpu               129 kernel/stop_machine.c 	if (!cpu_stop_queue_work(cpu, &work))
cpu               191 kernel/stop_machine.c 	int cpu = smp_processor_id(), err = 0;
cpu               204 kernel/stop_machine.c 		is_active = cpu == cpumask_first(cpumask);
cpu               207 kernel/stop_machine.c 		is_active = cpumask_test_cpu(cpu, cpumask);
cpu               366 kernel/stop_machine.c bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
cpu               370 kernel/stop_machine.c 	return cpu_stop_queue_work(cpu, work_buf);
cpu               378 kernel/stop_machine.c 	unsigned int cpu;
cpu               389 kernel/stop_machine.c 	for_each_cpu(cpu, cpumask) {
cpu               390 kernel/stop_machine.c 		work = &per_cpu(cpu_stopper.stop_work, cpu);
cpu               394 kernel/stop_machine.c 		if (cpu_stop_queue_work(cpu, work))
cpu               485 kernel/stop_machine.c static int cpu_stop_should_run(unsigned int cpu)
cpu               487 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
cpu               497 kernel/stop_machine.c static void cpu_stopper_thread(unsigned int cpu)
cpu               499 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
cpu               533 kernel/stop_machine.c void stop_machine_park(int cpu)
cpu               535 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
cpu               545 kernel/stop_machine.c extern void sched_set_stop_task(int cpu, struct task_struct *stop);
cpu               547 kernel/stop_machine.c static void cpu_stop_create(unsigned int cpu)
cpu               549 kernel/stop_machine.c 	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
cpu               552 kernel/stop_machine.c static void cpu_stop_park(unsigned int cpu)
cpu               554 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
cpu               559 kernel/stop_machine.c void stop_machine_unpark(int cpu)
cpu               561 kernel/stop_machine.c 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
cpu               579 kernel/stop_machine.c 	unsigned int cpu;
cpu               581 kernel/stop_machine.c 	for_each_possible_cpu(cpu) {
cpu               582 kernel/stop_machine.c 		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
cpu              2500 kernel/sys.c   	int cpu = raw_smp_processor_id();
cpu              2503 kernel/sys.c   		err |= put_user(cpu, cpup);
cpu              2505 kernel/sys.c   		err |= put_user(cpu_to_node(cpu), nodep);
cpu               282 kernel/taskstats.c 	unsigned int cpu;
cpu               295 kernel/taskstats.c 		for_each_cpu(cpu, mask) {
cpu               297 kernel/taskstats.c 					GFP_KERNEL, cpu_to_node(cpu));
cpu               305 kernel/taskstats.c 			listeners = &per_cpu(listener_array, cpu);
cpu               322 kernel/taskstats.c 	for_each_cpu(cpu, mask) {
cpu               323 kernel/taskstats.c 		listeners = &per_cpu(listener_array, cpu);
cpu               387 kernel/time/clockevents.c static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
cpu               395 kernel/time/clockevents.c 	return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
cpu               418 kernel/time/clockevents.c static int clockevents_unbind(struct clock_event_device *ced, int cpu)
cpu               422 kernel/time/clockevents.c 	smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
cpu               429 kernel/time/clockevents.c int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
cpu               434 kernel/time/clockevents.c 	ret = clockevents_unbind(ced, cpu);
cpu               622 kernel/time/clockevents.c void tick_offline_cpu(unsigned int cpu)
cpu               625 kernel/time/clockevents.c 	tick_broadcast_offline(cpu);
cpu               633 kernel/time/clockevents.c void tick_cleanup_dead_cpu(int cpu)
cpu               640 kernel/time/clockevents.c 	tick_shutdown(cpu);
cpu               651 kernel/time/clockevents.c 		if (cpumask_test_cpu(cpu, dev->cpumask) &&
cpu               750 kernel/time/clockevents.c 	int cpu;
cpu               752 kernel/time/clockevents.c 	for_each_possible_cpu(cpu) {
cpu               753 kernel/time/clockevents.c 		struct device *dev = &per_cpu(tick_percpu_dev, cpu);
cpu               756 kernel/time/clockevents.c 		dev->id = cpu;
cpu               733 kernel/time/hrtimer.c 			base->cpu);
cpu              1988 kernel/time/hrtimer.c int hrtimers_prepare_cpu(unsigned int cpu)
cpu              1990 kernel/time/hrtimer.c 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
cpu              1998 kernel/time/hrtimer.c 	cpu_base->cpu = cpu;
cpu               127 kernel/time/posix-cpu-timers.c 	u64 delta, incr, expires = timer->it.cpu.node.expires;
cpu               147 kernel/time/posix-cpu-timers.c 		timer->it.cpu.node.expires += incr;
cpu               151 kernel/time/posix-cpu-timers.c 	return timer->it.cpu.node.expires;
cpu               395 kernel/time/posix-cpu-timers.c 	timerqueue_init(&new_timer->it.cpu.node);
cpu               396 kernel/time/posix-cpu-timers.c 	new_timer->it.cpu.task = p;
cpu               408 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
cpu               429 kernel/time/posix-cpu-timers.c 		if (timer->it.cpu.firing)
cpu               490 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
cpu               523 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
cpu               566 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
cpu               599 kernel/time/posix-cpu-timers.c 	if (unlikely(timer->it.cpu.firing)) {
cpu               600 kernel/time/posix-cpu-timers.c 		timer->it.cpu.firing = -1;
cpu               705 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
cpu               979 kernel/time/posix-cpu-timers.c 	struct cpu_timer *ctmr = &timer->it.cpu;
cpu              1156 kernel/time/posix-cpu-timers.c 	list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
cpu              1160 kernel/time/posix-cpu-timers.c 		list_del_init(&timer->it.cpu.elist);
cpu              1161 kernel/time/posix-cpu-timers.c 		cpu_firing = timer->it.cpu.firing;
cpu              1162 kernel/time/posix-cpu-timers.c 		timer->it.cpu.firing = 0;
cpu              1252 kernel/time/posix-cpu-timers.c 			if (!cpu_timer_getexpires(&timer.it.cpu)) {
cpu              1274 kernel/time/posix-cpu-timers.c 		expires = cpu_timer_getexpires(&timer.it.cpu);
cpu                76 kernel/time/tick-broadcast-hrtimer.c 		bc->bound_on = bctimer.base->cpu_base->cpu;
cpu                37 kernel/time/tick-broadcast.c static void tick_broadcast_clear_oneshot(int cpu);
cpu                40 kernel/time/tick-broadcast.c static void tick_broadcast_oneshot_offline(unsigned int cpu);
cpu                44 kernel/time/tick-broadcast.c static inline void tick_broadcast_clear_oneshot(int cpu) { }
cpu                47 kernel/time/tick-broadcast.c static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { }
cpu               163 kernel/time/tick-broadcast.c int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
cpu               180 kernel/time/tick-broadcast.c 		cpumask_set_cpu(cpu, tick_broadcast_mask);
cpu               192 kernel/time/tick-broadcast.c 			cpumask_clear_cpu(cpu, tick_broadcast_mask);
cpu               200 kernel/time/tick-broadcast.c 		if (!cpumask_test_cpu(cpu, tick_broadcast_on))
cpu               201 kernel/time/tick-broadcast.c 			cpumask_clear_cpu(cpu, tick_broadcast_mask);
cpu               213 kernel/time/tick-broadcast.c 			tick_broadcast_clear_oneshot(cpu);
cpu               234 kernel/time/tick-broadcast.c 				ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
cpu               266 kernel/time/tick-broadcast.c 	int cpu = smp_processor_id();
cpu               273 kernel/time/tick-broadcast.c 	if (cpumask_test_cpu(cpu, mask)) {
cpu               276 kernel/time/tick-broadcast.c 		cpumask_clear_cpu(cpu, mask);
cpu               360 kernel/time/tick-broadcast.c 	int cpu, bc_stopped;
cpu               377 kernel/time/tick-broadcast.c 	cpu = smp_processor_id();
cpu               386 kernel/time/tick-broadcast.c 		cpumask_set_cpu(cpu, tick_broadcast_on);
cpu               387 kernel/time/tick-broadcast.c 		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
cpu               405 kernel/time/tick-broadcast.c 		cpumask_clear_cpu(cpu, tick_broadcast_on);
cpu               406 kernel/time/tick-broadcast.c 		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
cpu               455 kernel/time/tick-broadcast.c void tick_broadcast_offline(unsigned int cpu)
cpu               458 kernel/time/tick-broadcast.c 	cpumask_clear_cpu(cpu, tick_broadcast_mask);
cpu               459 kernel/time/tick-broadcast.c 	cpumask_clear_cpu(cpu, tick_broadcast_on);
cpu               460 kernel/time/tick-broadcast.c 	tick_broadcast_oneshot_offline(cpu);
cpu               565 kernel/time/tick-broadcast.c static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
cpu               572 kernel/time/tick-broadcast.c 	tick_broadcast_set_affinity(bc, cpumask_of(cpu));
cpu               608 kernel/time/tick-broadcast.c 	int cpu, next_cpu = 0;
cpu               617 kernel/time/tick-broadcast.c 	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
cpu               626 kernel/time/tick-broadcast.c 		td = &per_cpu(tick_cpu_device, cpu);
cpu               628 kernel/time/tick-broadcast.c 			cpumask_set_cpu(cpu, tmpmask);
cpu               634 kernel/time/tick-broadcast.c 			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
cpu               637 kernel/time/tick-broadcast.c 			next_cpu = cpu;
cpu               684 kernel/time/tick-broadcast.c static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
cpu               690 kernel/time/tick-broadcast.c 	return bc->bound_on == cpu ? -EBUSY : 0;
cpu               713 kernel/time/tick-broadcast.c 	int cpu, ret = 0;
cpu               727 kernel/time/tick-broadcast.c 	cpu = smp_processor_id();
cpu               737 kernel/time/tick-broadcast.c 		ret = broadcast_needs_cpu(bc, cpu);
cpu               752 kernel/time/tick-broadcast.c 		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
cpu               753 kernel/time/tick-broadcast.c 			WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
cpu               768 kernel/time/tick-broadcast.c 			if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
cpu               771 kernel/time/tick-broadcast.c 				tick_broadcast_set_event(bc, cpu, dev->next_event);
cpu               779 kernel/time/tick-broadcast.c 				ret = broadcast_needs_cpu(bc, cpu);
cpu               781 kernel/time/tick-broadcast.c 					cpumask_clear_cpu(cpu,
cpu               787 kernel/time/tick-broadcast.c 		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
cpu               798 kernel/time/tick-broadcast.c 			if (cpumask_test_and_clear_cpu(cpu,
cpu               841 kernel/time/tick-broadcast.c 				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
cpu               861 kernel/time/tick-broadcast.c static void tick_broadcast_clear_oneshot(int cpu)
cpu               863 kernel/time/tick-broadcast.c 	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
cpu               864 kernel/time/tick-broadcast.c 	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
cpu               871 kernel/time/tick-broadcast.c 	int cpu;
cpu               873 kernel/time/tick-broadcast.c 	for_each_cpu(cpu, mask) {
cpu               874 kernel/time/tick-broadcast.c 		td = &per_cpu(tick_cpu_device, cpu);
cpu               885 kernel/time/tick-broadcast.c 	int cpu = smp_processor_id();
cpu               903 kernel/time/tick-broadcast.c 		cpumask_clear_cpu(cpu, tmpmask);
cpu               911 kernel/time/tick-broadcast.c 			tick_broadcast_set_event(bc, cpu, tick_next_period);
cpu               922 kernel/time/tick-broadcast.c 		tick_broadcast_clear_oneshot(cpu);
cpu               963 kernel/time/tick-broadcast.c static void tick_broadcast_oneshot_offline(unsigned int cpu)
cpu               969 kernel/time/tick-broadcast.c 	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
cpu               970 kernel/time/tick-broadcast.c 	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
cpu               971 kernel/time/tick-broadcast.c 	cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
cpu                61 kernel/time/tick-common.c struct tick_device *tick_get_device(int cpu)
cpu                63 kernel/time/tick-common.c 	return &per_cpu(tick_cpu_device, cpu);
cpu                83 kernel/time/tick-common.c static void tick_periodic(int cpu)
cpu                85 kernel/time/tick-common.c 	if (tick_do_timer_cpu == cpu) {
cpu               105 kernel/time/tick-common.c 	int cpu = smp_processor_id();
cpu               108 kernel/time/tick-common.c 	tick_periodic(cpu);
cpu               141 kernel/time/tick-common.c 			tick_periodic(cpu);
cpu               181 kernel/time/tick-common.c 	int cpu = *(unsigned int *)info;
cpu               185 kernel/time/tick-common.c 	tick_do_timer_cpu = cpu;
cpu               190 kernel/time/tick-common.c 	int cpu = smp_processor_id();
cpu               193 kernel/time/tick-common.c 	if (from >= 0 && from != cpu)
cpu               194 kernel/time/tick-common.c 		smp_call_function_single(from, giveup_do_timer, &cpu, 1);
cpu               202 kernel/time/tick-common.c 			      struct clock_event_device *newdev, int cpu,
cpu               217 kernel/time/tick-common.c 			tick_do_timer_cpu = cpu;
cpu               228 kernel/time/tick-common.c 			if (tick_nohz_full_cpu(cpu))
cpu               229 kernel/time/tick-common.c 				tick_do_timer_boot_cpu = cpu;
cpu               232 kernel/time/tick-common.c 						!tick_nohz_full_cpu(cpu)) {
cpu               235 kernel/time/tick-common.c 			WARN_ON(tick_do_timer_cpu != cpu);
cpu               265 kernel/time/tick-common.c 	if (tick_device_uses_broadcast(newdev, cpu))
cpu               277 kernel/time/tick-common.c 	int cpu = smp_processor_id();
cpu               280 kernel/time/tick-common.c 	tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
cpu               286 kernel/time/tick-common.c 			      struct clock_event_device *newdev, int cpu)
cpu               288 kernel/time/tick-common.c 	if (!cpumask_test_cpu(cpu, newdev->cpumask))
cpu               290 kernel/time/tick-common.c 	if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
cpu               296 kernel/time/tick-common.c 	if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
cpu               342 kernel/time/tick-common.c 	int cpu;
cpu               344 kernel/time/tick-common.c 	cpu = smp_processor_id();
cpu               345 kernel/time/tick-common.c 	td = &per_cpu(tick_cpu_device, cpu);
cpu               349 kernel/time/tick-common.c 	if (!tick_check_percpu(curdev, newdev, cpu))
cpu               369 kernel/time/tick-common.c 	tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
cpu               413 kernel/time/tick-common.c 		int cpu = cpumask_first(cpu_online_mask);
cpu               415 kernel/time/tick-common.c 		tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
cpu               427 kernel/time/tick-common.c void tick_shutdown(unsigned int cpu)
cpu               429 kernel/time/tick-common.c 	struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
cpu                24 kernel/time/tick-internal.h extern void tick_shutdown(unsigned int cpu);
cpu                31 kernel/time/tick-internal.h extern struct tick_device *tick_get_device(int cpu);
cpu                64 kernel/time/tick-internal.h extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
cpu                78 kernel/time/tick-internal.h static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; }
cpu               141 kernel/time/tick-internal.h extern void tick_broadcast_offline(unsigned int cpu);
cpu               143 kernel/time/tick-internal.h static inline void tick_broadcast_offline(unsigned int cpu) { }
cpu                40 kernel/time/tick-sched.c struct tick_sched *tick_get_tick_sched(int cpu)
cpu                42 kernel/time/tick-sched.c 	return &per_cpu(tick_cpu_sched, cpu);
cpu               119 kernel/time/tick-sched.c 	int cpu = smp_processor_id();
cpu               136 kernel/time/tick-sched.c 		tick_do_timer_cpu = cpu;
cpu               141 kernel/time/tick-sched.c 	if (tick_do_timer_cpu == cpu)
cpu               208 kernel/time/tick-sched.c static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
cpu               212 kernel/time/tick-sched.c 	if (unlikely(!cpu_online(cpu)))
cpu               257 kernel/time/tick-sched.c void tick_nohz_full_kick_cpu(int cpu)
cpu               259 kernel/time/tick-sched.c 	if (!tick_nohz_full_cpu(cpu))
cpu               262 kernel/time/tick-sched.c 	irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
cpu               271 kernel/time/tick-sched.c 	int cpu;
cpu               277 kernel/time/tick-sched.c 	for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
cpu               278 kernel/time/tick-sched.c 		tick_nohz_full_kick_cpu(cpu);
cpu               310 kernel/time/tick-sched.c void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
cpu               315 kernel/time/tick-sched.c 	ts = per_cpu_ptr(&tick_cpu_sched, cpu);
cpu               321 kernel/time/tick-sched.c 		if (cpu == smp_processor_id()) {
cpu               326 kernel/time/tick-sched.c 				tick_nohz_full_kick_cpu(cpu);
cpu               332 kernel/time/tick-sched.c void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
cpu               334 kernel/time/tick-sched.c 	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
cpu               405 kernel/time/tick-sched.c static int tick_nohz_cpu_down(unsigned int cpu)
cpu               412 kernel/time/tick-sched.c 	if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
cpu               419 kernel/time/tick-sched.c 	int cpu, ret;
cpu               438 kernel/time/tick-sched.c 		cpu = smp_processor_id();
cpu               440 kernel/time/tick-sched.c 		if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
cpu               442 kernel/time/tick-sched.c 				"for timekeeping\n", cpu);
cpu               443 kernel/time/tick-sched.c 			cpumask_clear_cpu(cpu, tick_nohz_full_mask);
cpu               447 kernel/time/tick-sched.c 	for_each_cpu(cpu, tick_nohz_full_mask)
cpu               448 kernel/time/tick-sched.c 		context_tracking_cpu_set(cpu);
cpu               485 kernel/time/tick-sched.c bool tick_nohz_tick_stopped_cpu(int cpu)
cpu               487 kernel/time/tick-sched.c 	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
cpu               519 kernel/time/tick-sched.c update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
cpu               525 kernel/time/tick-sched.c 		if (nr_iowait_cpu(cpu) > 0)
cpu               566 kernel/time/tick-sched.c u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
cpu               568 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
cpu               576 kernel/time/tick-sched.c 		update_ts_time_stats(cpu, ts, now, last_update_time);
cpu               579 kernel/time/tick-sched.c 		if (ts->idle_active && !nr_iowait_cpu(cpu)) {
cpu               607 kernel/time/tick-sched.c u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
cpu               609 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
cpu               617 kernel/time/tick-sched.c 		update_ts_time_stats(cpu, ts, now, last_update_time);
cpu               620 kernel/time/tick-sched.c 		if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
cpu               660 kernel/time/tick-sched.c static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
cpu               729 kernel/time/tick-sched.c 	if (cpu != tick_do_timer_cpu &&
cpu               745 kernel/time/tick-sched.c static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
cpu               763 kernel/time/tick-sched.c 	if (cpu == tick_do_timer_cpu) {
cpu               825 kernel/time/tick-sched.c static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu)
cpu               827 kernel/time/tick-sched.c 	if (tick_nohz_next_event(ts, cpu))
cpu               828 kernel/time/tick-sched.c 		tick_nohz_stop_tick(ts, cpu);
cpu               859 kernel/time/tick-sched.c 	int cpu = smp_processor_id();
cpu               861 kernel/time/tick-sched.c 	if (!tick_nohz_full_cpu(cpu))
cpu               867 kernel/time/tick-sched.c 	if (can_stop_full_tick(cpu, ts))
cpu               868 kernel/time/tick-sched.c 		tick_nohz_stop_sched_tick(ts, cpu);
cpu               874 kernel/time/tick-sched.c static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
cpu               883 kernel/time/tick-sched.c 	if (unlikely(!cpu_online(cpu))) {
cpu               884 kernel/time/tick-sched.c 		if (cpu == tick_do_timer_cpu)
cpu               917 kernel/time/tick-sched.c 		if (tick_do_timer_cpu == cpu)
cpu               938 kernel/time/tick-sched.c 	int cpu = smp_processor_id();
cpu               946 kernel/time/tick-sched.c 	else if (can_stop_idle_tick(cpu, ts))
cpu               947 kernel/time/tick-sched.c 		expires = tick_nohz_next_event(ts, cpu);
cpu               956 kernel/time/tick-sched.c 		tick_nohz_stop_tick(ts, cpu);
cpu               963 kernel/time/tick-sched.c 			nohz_balance_enter_idle(cpu);
cpu              1067 kernel/time/tick-sched.c 	int cpu = smp_processor_id();
cpu              1079 kernel/time/tick-sched.c 	if (!can_stop_idle_tick(cpu, ts))
cpu              1082 kernel/time/tick-sched.c 	next_event = tick_nohz_next_event(ts, cpu);
cpu              1102 kernel/time/tick-sched.c unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
cpu              1104 kernel/time/tick-sched.c 	struct tick_sched *ts = tick_get_tick_sched(cpu);
cpu              1358 kernel/time/tick-sched.c void tick_cancel_sched_timer(int cpu)
cpu              1360 kernel/time/tick-sched.c 	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
cpu              1376 kernel/time/tick-sched.c 	int cpu;
cpu              1378 kernel/time/tick-sched.c 	for_each_possible_cpu(cpu)
cpu              1379 kernel/time/tick-sched.c 		set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
cpu                82 kernel/time/tick-sched.h extern struct tick_sched *tick_get_tick_sched(int cpu);
cpu                86 kernel/time/tick-sched.h extern void tick_cancel_sched_timer(int cpu);
cpu                88 kernel/time/tick-sched.h static inline void tick_cancel_sched_timer(int cpu) { }
cpu               205 kernel/time/timer.c 	unsigned int		cpu;
cpu               273 kernel/time/timer.c static unsigned long round_jiffies_common(unsigned long j, int cpu,
cpu               287 kernel/time/timer.c 	j += cpu * 3;
cpu               304 kernel/time/timer.c 	j -= cpu * 3;
cpu               333 kernel/time/timer.c unsigned long __round_jiffies(unsigned long j, int cpu)
cpu               335 kernel/time/timer.c 	return round_jiffies_common(j, cpu, false);
cpu               359 kernel/time/timer.c unsigned long __round_jiffies_relative(unsigned long j, int cpu)
cpu               364 kernel/time/timer.c 	return round_jiffies_common(j + j0, cpu, false) - j0;
cpu               420 kernel/time/timer.c unsigned long __round_jiffies_up(unsigned long j, int cpu)
cpu               422 kernel/time/timer.c 	return round_jiffies_common(j, cpu, true);
cpu               436 kernel/time/timer.c unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
cpu               441 kernel/time/timer.c 	return round_jiffies_common(j + j0, cpu, true) - j0;
cpu               567 kernel/time/timer.c 		if (tick_nohz_full_cpu(base->cpu))
cpu               568 kernel/time/timer.c 			wake_up_nohz_cpu(base->cpu);
cpu               589 kernel/time/timer.c 	wake_up_nohz_cpu(base->cpu);
cpu               836 kernel/time/timer.c static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
cpu               838 kernel/time/timer.c 	struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
cpu               845 kernel/time/timer.c 		base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
cpu              1034 kernel/time/timer.c 				   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
cpu              1147 kernel/time/timer.c void add_timer_on(struct timer_list *timer, int cpu)
cpu              1154 kernel/time/timer.c 	new_base = get_timer_cpu_base(timer->flags, cpu);
cpu              1169 kernel/time/timer.c 			   (timer->flags & ~TIMER_BASEMASK) | cpu);
cpu              1948 kernel/time/timer.c 	int cpu = new_base->cpu;
cpu              1953 kernel/time/timer.c 		timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
cpu              1958 kernel/time/timer.c int timers_prepare_cpu(unsigned int cpu)
cpu              1964 kernel/time/timer.c 		base = per_cpu_ptr(&timer_bases[b], cpu);
cpu              1973 kernel/time/timer.c int timers_dead_cpu(unsigned int cpu)
cpu              1979 kernel/time/timer.c 	BUG_ON(cpu_online(cpu));
cpu              1982 kernel/time/timer.c 		old_base = per_cpu_ptr(&timer_bases[b], cpu);
cpu              2011 kernel/time/timer.c static void __init init_timer_cpu(int cpu)
cpu              2017 kernel/time/timer.c 		base = per_cpu_ptr(&timer_bases[i], cpu);
cpu              2018 kernel/time/timer.c 		base->cpu = cpu;
cpu              2027 kernel/time/timer.c 	int cpu;
cpu              2029 kernel/time/timer.c 	for_each_possible_cpu(cpu)
cpu              2030 kernel/time/timer.c 		init_timer_cpu(cpu);
cpu                21 kernel/time/timer_list.c 	int cpu;
cpu               130 kernel/time/timer_list.c static void print_cpu(struct seq_file *m, int cpu, u64 now)
cpu               132 kernel/time/timer_list.c 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
cpu               135 kernel/time/timer_list.c 	SEQ_printf(m, "cpu: %d\n", cpu);
cpu               166 kernel/time/timer_list.c 		struct tick_sched *ts = tick_get_tick_sched(cpu);
cpu               193 kernel/time/timer_list.c print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
cpu               200 kernel/time/timer_list.c 	if (cpu < 0)
cpu               203 kernel/time/timer_list.c 		SEQ_printf(m, "Per CPU device: %d\n", cpu);
cpu               288 kernel/time/timer_list.c 	int cpu;
cpu               292 kernel/time/timer_list.c 	for_each_online_cpu(cpu)
cpu               293 kernel/time/timer_list.c 		print_cpu(NULL, cpu, now);
cpu               297 kernel/time/timer_list.c 	for_each_online_cpu(cpu)
cpu               298 kernel/time/timer_list.c 		print_tickdevice(NULL, tick_get_device(cpu), cpu);
cpu               308 kernel/time/timer_list.c 	if (iter->cpu == -1 && !iter->second_pass)
cpu               311 kernel/time/timer_list.c 		print_cpu(m, iter->cpu, iter->now);
cpu               313 kernel/time/timer_list.c 	else if (iter->cpu == -1 && iter->second_pass)
cpu               316 kernel/time/timer_list.c 		print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
cpu               324 kernel/time/timer_list.c 		iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
cpu               325 kernel/time/timer_list.c 		if (iter->cpu >= nr_cpu_ids) {
cpu               328 kernel/time/timer_list.c 				iter->cpu = -1;
cpu               346 kernel/time/timer_list.c 	iter->cpu = -1;
cpu                82 kernel/torture.c bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
cpu                89 kernel/torture.c 	if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
cpu                97 kernel/torture.c 			 torture_type, cpu);
cpu               100 kernel/torture.c 	ret = cpu_down(cpu);
cpu               105 kernel/torture.c 				 torture_type, cpu, ret);
cpu               110 kernel/torture.c 				 torture_type, cpu);
cpu               135 kernel/torture.c bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
cpu               142 kernel/torture.c 	if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
cpu               148 kernel/torture.c 			 torture_type, cpu);
cpu               151 kernel/torture.c 	ret = cpu_up(cpu);
cpu               156 kernel/torture.c 				 torture_type, cpu, ret);
cpu               161 kernel/torture.c 				 torture_type, cpu);
cpu               186 kernel/torture.c 	int cpu;
cpu               192 kernel/torture.c 	for_each_online_cpu(cpu)
cpu               193 kernel/torture.c 		maxcpu = cpu;
cpu               196 kernel/torture.c 		for_each_possible_cpu(cpu) {
cpu               197 kernel/torture.c 			if (cpu_online(cpu))
cpu               199 kernel/torture.c 			ret = cpu_up(cpu);
cpu               203 kernel/torture.c 					 __func__, torture_type, cpu, ret);
cpu               218 kernel/torture.c 		cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
cpu               219 kernel/torture.c 		if (!torture_offline(cpu,
cpu               222 kernel/torture.c 			torture_online(cpu,
cpu                74 kernel/trace/blktrace.c 	int cpu = smp_processor_id();
cpu               101 kernel/trace/blktrace.c 		t->cpu = cpu;
cpu               224 kernel/trace/blktrace.c 	int cpu, pc = 0;
cpu               247 kernel/trace/blktrace.c 	cpu = raw_smp_processor_id();
cpu               274 kernel/trace/blktrace.c 		sequence = per_cpu_ptr(bt->sequence, cpu);
cpu               286 kernel/trace/blktrace.c 		t->cpu = cpu;
cpu              1293 kernel/trace/blktrace.c 			 MAJOR(t->device), MINOR(t->device), iter->cpu,
cpu               351 kernel/trace/bpf_trace.c 	unsigned int cpu = smp_processor_id();
cpu               358 kernel/trace/bpf_trace.c 		index = cpu;
cpu               424 kernel/trace/bpf_trace.c 	unsigned int cpu = smp_processor_id();
cpu               430 kernel/trace/bpf_trace.c 		index = cpu;
cpu               443 kernel/trace/bpf_trace.c 	if (unlikely(event->oncpu != cpu))
cpu              1460 kernel/trace/bpf_trace.c 	int cpu;
cpu              1463 kernel/trace/bpf_trace.c 	for_each_possible_cpu(cpu) {
cpu              1464 kernel/trace/bpf_trace.c 		work = per_cpu_ptr(&send_signal_work, cpu);
cpu               477 kernel/trace/fgraph.c void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
cpu               486 kernel/trace/fgraph.c 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
cpu               491 kernel/trace/fgraph.c 		ret_stack = per_cpu(idle_ret_stack, cpu);
cpu               499 kernel/trace/fgraph.c 			per_cpu(idle_ret_stack, cpu) = ret_stack;
cpu               540 kernel/trace/fgraph.c 	int ret, cpu;
cpu               550 kernel/trace/fgraph.c 	for_each_online_cpu(cpu) {
cpu               551 kernel/trace/fgraph.c 		if (!idle_task(cpu)->ret_stack)
cpu               552 kernel/trace/fgraph.c 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
cpu               644 kernel/trace/ftrace.c static int ftrace_profile_init_cpu(int cpu)
cpu               649 kernel/trace/ftrace.c 	stat = &per_cpu(ftrace_profile_stats, cpu);
cpu               680 kernel/trace/ftrace.c 	int cpu;
cpu               683 kernel/trace/ftrace.c 	for_each_possible_cpu(cpu) {
cpu               684 kernel/trace/ftrace.c 		ret = ftrace_profile_init_cpu(cpu);
cpu               967 kernel/trace/ftrace.c 	int cpu;
cpu               969 kernel/trace/ftrace.c 	for_each_possible_cpu(cpu) {
cpu               970 kernel/trace/ftrace.c 		stat = &per_cpu(ftrace_profile_stats, cpu);
cpu               972 kernel/trace/ftrace.c 		name = kasprintf(GFP_KERNEL, "function%d", cpu);
cpu               980 kernel/trace/ftrace.c 			     cpu);
cpu               989 kernel/trace/ftrace.c 			     cpu);
cpu              6475 kernel/trace/ftrace.c 	int cpu;
cpu              6484 kernel/trace/ftrace.c 	for_each_possible_cpu(cpu)
cpu              6485 kernel/trace/ftrace.c 		per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
cpu               270 kernel/trace/ring_buffer.c #define for_each_buffer_cpu(buffer, cpu)		\
cpu               271 kernel/trace/ring_buffer.c 	for_each_cpu(cpu, buffer->cpumask)
cpu               444 kernel/trace/ring_buffer.c 	int				cpu;
cpu               521 kernel/trace/ring_buffer.c size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
cpu               523 kernel/trace/ring_buffer.c 	return buffer->buffers[cpu]->nr_pages;
cpu               533 kernel/trace/ring_buffer.c size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
cpu               538 kernel/trace/ring_buffer.c 	read = local_read(&buffer->buffers[cpu]->pages_read);
cpu               539 kernel/trace/ring_buffer.c 	cnt = local_read(&buffer->buffers[cpu]->pages_touched);
cpu               576 kernel/trace/ring_buffer.c int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
cpu               588 kernel/trace/ring_buffer.c 	if (cpu == RING_BUFFER_ALL_CPUS) {
cpu               593 kernel/trace/ring_buffer.c 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu               595 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu               636 kernel/trace/ring_buffer.c 		if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
cpu               639 kernel/trace/ring_buffer.c 		if (cpu != RING_BUFFER_ALL_CPUS &&
cpu               640 kernel/trace/ring_buffer.c 		    !ring_buffer_empty_cpu(buffer, cpu)) {
cpu               652 kernel/trace/ring_buffer.c 			dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
cpu               687 kernel/trace/ring_buffer.c __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
cpu               693 kernel/trace/ring_buffer.c 	if (cpu == RING_BUFFER_ALL_CPUS)
cpu               696 kernel/trace/ring_buffer.c 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu               699 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu               720 kernel/trace/ring_buffer.c 	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
cpu               721 kernel/trace/ring_buffer.c 	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
cpu               751 kernel/trace/ring_buffer.c u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
cpu               764 kernel/trace/ring_buffer.c 				      int cpu, u64 *ts)
cpu              1189 kernel/trace/ring_buffer.c static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
cpu              1229 kernel/trace/ring_buffer.c 				    mflags, cpu_to_node(cpu));
cpu              1235 kernel/trace/ring_buffer.c 		page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
cpu              1267 kernel/trace/ring_buffer.c 	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
cpu              1286 kernel/trace/ring_buffer.c rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
cpu              1294 kernel/trace/ring_buffer.c 				  GFP_KERNEL, cpu_to_node(cpu));
cpu              1298 kernel/trace/ring_buffer.c 	cpu_buffer->cpu = cpu;
cpu              1310 kernel/trace/ring_buffer.c 			    GFP_KERNEL, cpu_to_node(cpu));
cpu              1317 kernel/trace/ring_buffer.c 	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
cpu              1383 kernel/trace/ring_buffer.c 	int cpu;
cpu              1415 kernel/trace/ring_buffer.c 	cpu = raw_smp_processor_id();
cpu              1416 kernel/trace/ring_buffer.c 	cpumask_set_cpu(cpu, buffer->cpumask);
cpu              1417 kernel/trace/ring_buffer.c 	buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
cpu              1418 kernel/trace/ring_buffer.c 	if (!buffer->buffers[cpu])
cpu              1430 kernel/trace/ring_buffer.c 	for_each_buffer_cpu(buffer, cpu) {
cpu              1431 kernel/trace/ring_buffer.c 		if (buffer->buffers[cpu])
cpu              1432 kernel/trace/ring_buffer.c 			rb_free_cpu_buffer(buffer->buffers[cpu]);
cpu              1452 kernel/trace/ring_buffer.c 	int cpu;
cpu              1456 kernel/trace/ring_buffer.c 	for_each_buffer_cpu(buffer, cpu)
cpu              1457 kernel/trace/ring_buffer.c 		rb_free_cpu_buffer(buffer->buffers[cpu]);
cpu              1720 kernel/trace/ring_buffer.c 	int cpu, err = 0;
cpu              1754 kernel/trace/ring_buffer.c 		for_each_buffer_cpu(buffer, cpu) {
cpu              1755 kernel/trace/ring_buffer.c 			cpu_buffer = buffer->buffers[cpu];
cpu              1770 kernel/trace/ring_buffer.c 						&cpu_buffer->new_pages, cpu)) {
cpu              1783 kernel/trace/ring_buffer.c 		for_each_buffer_cpu(buffer, cpu) {
cpu              1784 kernel/trace/ring_buffer.c 			cpu_buffer = buffer->buffers[cpu];
cpu              1789 kernel/trace/ring_buffer.c 			if (!cpu_online(cpu)) {
cpu              1793 kernel/trace/ring_buffer.c 				schedule_work_on(cpu,
cpu              1799 kernel/trace/ring_buffer.c 		for_each_buffer_cpu(buffer, cpu) {
cpu              1800 kernel/trace/ring_buffer.c 			cpu_buffer = buffer->buffers[cpu];
cpu              1804 kernel/trace/ring_buffer.c 			if (cpu_online(cpu))
cpu              1863 kernel/trace/ring_buffer.c 		for_each_buffer_cpu(buffer, cpu) {
cpu              1864 kernel/trace/ring_buffer.c 			cpu_buffer = buffer->buffers[cpu];
cpu              1874 kernel/trace/ring_buffer.c 	for_each_buffer_cpu(buffer, cpu) {
cpu              1877 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu              2643 kernel/trace/ring_buffer.c 	dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
cpu              2739 kernel/trace/ring_buffer.c 	int cpu;
cpu              2743 kernel/trace/ring_buffer.c 	cpu = raw_smp_processor_id();
cpu              2744 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              2759 kernel/trace/ring_buffer.c 	int cpu;
cpu              2762 kernel/trace/ring_buffer.c 	cpu = raw_smp_processor_id();
cpu              2763 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              2782 kernel/trace/ring_buffer.c 	int cpu = raw_smp_processor_id();
cpu              2784 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              2968 kernel/trace/ring_buffer.c 	int cpu;
cpu              2976 kernel/trace/ring_buffer.c 	cpu = raw_smp_processor_id();
cpu              2978 kernel/trace/ring_buffer.c 	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
cpu              2981 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3069 kernel/trace/ring_buffer.c 	int cpu;
cpu              3074 kernel/trace/ring_buffer.c 	cpu = smp_processor_id();
cpu              3075 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3124 kernel/trace/ring_buffer.c 	int cpu;
cpu              3131 kernel/trace/ring_buffer.c 	cpu = raw_smp_processor_id();
cpu              3133 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              3136 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3298 kernel/trace/ring_buffer.c void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
cpu              3302 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              3305 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3318 kernel/trace/ring_buffer.c void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
cpu              3322 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              3325 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3348 kernel/trace/ring_buffer.c u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
cpu              3355 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              3358 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3381 kernel/trace/ring_buffer.c unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
cpu              3386 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              3389 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3401 kernel/trace/ring_buffer.c unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
cpu              3405 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              3408 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3420 kernel/trace/ring_buffer.c unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
cpu              3425 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              3428 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3443 kernel/trace/ring_buffer.c ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
cpu              3448 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              3451 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3465 kernel/trace/ring_buffer.c ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
cpu              3470 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              3473 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3486 kernel/trace/ring_buffer.c ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
cpu              3490 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              3493 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              3509 kernel/trace/ring_buffer.c 	int cpu;
cpu              3512 kernel/trace/ring_buffer.c 	for_each_buffer_cpu(buffer, cpu) {
cpu              3513 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu              3532 kernel/trace/ring_buffer.c 	int cpu;
cpu              3535 kernel/trace/ring_buffer.c 	for_each_buffer_cpu(buffer, cpu) {
cpu              3536 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu              3925 kernel/trace/ring_buffer.c 							 cpu_buffer->cpu, ts);
cpu              3935 kernel/trace/ring_buffer.c 							 cpu_buffer->cpu, ts);
cpu              4015 kernel/trace/ring_buffer.c 							 cpu_buffer->cpu, ts);
cpu              4025 kernel/trace/ring_buffer.c 							 cpu_buffer->cpu, ts);
cpu              4080 kernel/trace/ring_buffer.c ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
cpu              4083 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
cpu              4088 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              4144 kernel/trace/ring_buffer.c ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
cpu              4156 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              4159 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              4204 kernel/trace/ring_buffer.c ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
cpu              4209 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              4216 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              4335 kernel/trace/ring_buffer.c unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
cpu              4343 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              4346 kernel/trace/ring_buffer.c 	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
cpu              4401 kernel/trace/ring_buffer.c void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
cpu              4403 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
cpu              4406 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              4440 kernel/trace/ring_buffer.c 	int cpu;
cpu              4442 kernel/trace/ring_buffer.c 	for_each_buffer_cpu(buffer, cpu)
cpu              4443 kernel/trace/ring_buffer.c 		ring_buffer_reset_cpu(buffer, cpu);
cpu              4456 kernel/trace/ring_buffer.c 	int cpu;
cpu              4460 kernel/trace/ring_buffer.c 	for_each_buffer_cpu(buffer, cpu) {
cpu              4461 kernel/trace/ring_buffer.c 		cpu_buffer = buffer->buffers[cpu];
cpu              4481 kernel/trace/ring_buffer.c bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
cpu              4488 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              4491 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              4514 kernel/trace/ring_buffer.c 			 struct ring_buffer *buffer_b, int cpu)
cpu              4520 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
cpu              4521 kernel/trace/ring_buffer.c 	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
cpu              4524 kernel/trace/ring_buffer.c 	cpu_buffer_a = buffer_a->buffers[cpu];
cpu              4525 kernel/trace/ring_buffer.c 	cpu_buffer_b = buffer_b->buffers[cpu];
cpu              4560 kernel/trace/ring_buffer.c 	buffer_a->buffers[cpu] = cpu_buffer_b;
cpu              4561 kernel/trace/ring_buffer.c 	buffer_b->buffers[cpu] = cpu_buffer_a;
cpu              4593 kernel/trace/ring_buffer.c void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
cpu              4600 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              4603 kernel/trace/ring_buffer.c 	cpu_buffer = buffer->buffers[cpu];
cpu              4618 kernel/trace/ring_buffer.c 	page = alloc_pages_node(cpu_to_node(cpu),
cpu              4640 kernel/trace/ring_buffer.c void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
cpu              4642 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
cpu              4701 kernel/trace/ring_buffer.c 			  void **data_page, size_t len, int cpu, int full)
cpu              4703 kernel/trace/ring_buffer.c 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
cpu              4714 kernel/trace/ring_buffer.c 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
cpu              4869 kernel/trace/ring_buffer.c int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
cpu              4877 kernel/trace/ring_buffer.c 	if (cpumask_test_cpu(cpu, buffer->cpumask))
cpu              4895 kernel/trace/ring_buffer.c 	buffer->buffers[cpu] =
cpu              4896 kernel/trace/ring_buffer.c 		rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
cpu              4897 kernel/trace/ring_buffer.c 	if (!buffer->buffers[cpu]) {
cpu              4899 kernel/trace/ring_buffer.c 		     cpu);
cpu              4903 kernel/trace/ring_buffer.c 	cpumask_set_cpu(cpu, buffer->cpumask);
cpu              4939 kernel/trace/ring_buffer.c 	int			cpu;
cpu              5046 kernel/trace/ring_buffer.c 	int cpu = smp_processor_id();
cpu              5048 kernel/trace/ring_buffer.c 	data = &rb_data[cpu];
cpu              5069 kernel/trace/ring_buffer.c 	int cpu;
cpu              5086 kernel/trace/ring_buffer.c 	for_each_online_cpu(cpu) {
cpu              5087 kernel/trace/ring_buffer.c 		rb_data[cpu].buffer = buffer;
cpu              5088 kernel/trace/ring_buffer.c 		rb_data[cpu].cpu = cpu;
cpu              5089 kernel/trace/ring_buffer.c 		rb_data[cpu].cnt = cpu;
cpu              5090 kernel/trace/ring_buffer.c 		rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
cpu              5091 kernel/trace/ring_buffer.c 						 "rbtester/%d", cpu);
cpu              5092 kernel/trace/ring_buffer.c 		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
cpu              5094 kernel/trace/ring_buffer.c 			ret = PTR_ERR(rb_threads[cpu]);
cpu              5098 kernel/trace/ring_buffer.c 		kthread_bind(rb_threads[cpu], cpu);
cpu              5099 kernel/trace/ring_buffer.c  		wake_up_process(rb_threads[cpu]);
cpu              5130 kernel/trace/ring_buffer.c 	for_each_online_cpu(cpu) {
cpu              5131 kernel/trace/ring_buffer.c 		if (!rb_threads[cpu])
cpu              5133 kernel/trace/ring_buffer.c 		kthread_stop(rb_threads[cpu]);
cpu              5142 kernel/trace/ring_buffer.c 	for_each_online_cpu(cpu) {
cpu              5144 kernel/trace/ring_buffer.c 		struct rb_test_data *data = &rb_data[cpu];
cpu              5168 kernel/trace/ring_buffer.c 		pr_info("CPU %d:\n", cpu);
cpu              5181 kernel/trace/ring_buffer.c 		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
cpu                85 kernel/trace/ring_buffer_benchmark.c static enum event_status read_event(int cpu)
cpu                91 kernel/trace/ring_buffer_benchmark.c 	event = ring_buffer_consume(buffer, cpu, &ts, NULL);
cpu                96 kernel/trace/ring_buffer_benchmark.c 	if (*entry != cpu) {
cpu               105 kernel/trace/ring_buffer_benchmark.c static enum event_status read_page(int cpu)
cpu               116 kernel/trace/ring_buffer_benchmark.c 	bpage = ring_buffer_alloc_read_page(buffer, cpu);
cpu               120 kernel/trace/ring_buffer_benchmark.c 	ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
cpu               146 kernel/trace/ring_buffer_benchmark.c 				if (*entry != cpu) {
cpu               159 kernel/trace/ring_buffer_benchmark.c 				if (*entry != cpu) {
cpu               175 kernel/trace/ring_buffer_benchmark.c 	ring_buffer_free_read_page(buffer, cpu, bpage);
cpu               196 kernel/trace/ring_buffer_benchmark.c 			int cpu;
cpu               199 kernel/trace/ring_buffer_benchmark.c 			for_each_online_cpu(cpu) {
cpu               203 kernel/trace/ring_buffer_benchmark.c 					stat = read_event(cpu);
cpu               205 kernel/trace/ring_buffer_benchmark.c 					stat = read_page(cpu);
cpu               591 kernel/trace/trace.c static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
cpu               599 kernel/trace/trace.c 	ts = ring_buffer_time_stamp(buf->buffer, cpu);
cpu               600 kernel/trace/trace.c 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
cpu               605 kernel/trace/trace.c u64 ftrace_now(int cpu)
cpu               607 kernel/trace/trace.c 	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
cpu               678 kernel/trace/trace.c static inline void trace_access_lock(int cpu)
cpu               680 kernel/trace/trace.c 	if (cpu == RING_BUFFER_ALL_CPUS) {
cpu               690 kernel/trace/trace.c 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
cpu               694 kernel/trace/trace.c static inline void trace_access_unlock(int cpu)
cpu               696 kernel/trace/trace.c 	if (cpu == RING_BUFFER_ALL_CPUS) {
cpu               699 kernel/trace/trace.c 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
cpu               706 kernel/trace/trace.c 	int cpu;
cpu               708 kernel/trace/trace.c 	for_each_possible_cpu(cpu)
cpu               709 kernel/trace/trace.c 		mutex_init(&per_cpu(cpu_access_lock, cpu));
cpu               716 kernel/trace/trace.c static inline void trace_access_lock(int cpu)
cpu               718 kernel/trace/trace.c 	(void)cpu;
cpu               722 kernel/trace/trace.c static inline void trace_access_unlock(int cpu)
cpu               724 kernel/trace/trace.c 	(void)cpu;
cpu              1508 kernel/trace/trace.c __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
cpu              1512 kernel/trace/trace.c 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
cpu              1513 kernel/trace/trace.c 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
cpu              1515 kernel/trace/trace.c 	max_buf->cpu = cpu;
cpu              1552 kernel/trace/trace.c update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
cpu              1580 kernel/trace/trace.c 	__update_max_tr(tr, tsk, cpu);
cpu              1595 kernel/trace/trace.c update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
cpu              1611 kernel/trace/trace.c 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
cpu              1626 kernel/trace/trace.c 	__update_max_tr(tr, tsk, cpu);
cpu              1883 kernel/trace/trace.c static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
cpu              1894 kernel/trace/trace.c 	ring_buffer_reset_cpu(buffer, cpu);
cpu              1902 kernel/trace/trace.c 	int cpu;
cpu              1912 kernel/trace/trace.c 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
cpu              1914 kernel/trace/trace.c 	for_each_online_cpu(cpu)
cpu              1915 kernel/trace/trace.c 		ring_buffer_reset_cpu(buffer, cpu);
cpu              2393 kernel/trace/trace.c 	int cpu;
cpu              2400 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              2401 kernel/trace/trace.c 		page = alloc_pages_node(cpu_to_node(cpu),
cpu              2409 kernel/trace/trace.c 		per_cpu(trace_buffered_event, cpu) = event;
cpu              2412 kernel/trace/trace.c 		if (cpu == smp_processor_id() &&
cpu              2414 kernel/trace/trace.c 		    per_cpu(trace_buffered_event, cpu))
cpu              2446 kernel/trace/trace.c 	int cpu;
cpu              2465 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              2466 kernel/trace/trace.c 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
cpu              2467 kernel/trace/trace.c 		per_cpu(trace_buffered_event, cpu) = NULL;
cpu              3268 kernel/trace/trace.c 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
cpu              3276 kernel/trace/trace.c peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
cpu              3280 kernel/trace/trace.c 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
cpu              3285 kernel/trace/trace.c 		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
cpu              3307 kernel/trace/trace.c 	int cpu;
cpu              3323 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              3325 kernel/trace/trace.c 		if (ring_buffer_empty_cpu(buffer, cpu))
cpu              3328 kernel/trace/trace.c 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
cpu              3335 kernel/trace/trace.c 			next_cpu = cpu;
cpu              3366 kernel/trace/trace.c 	iter->ent = __find_next_entry(iter, &iter->cpu,
cpu              3377 kernel/trace/trace.c 	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
cpu              3408 kernel/trace/trace.c void tracing_iter_reset(struct trace_iterator *iter, int cpu)
cpu              3415 kernel/trace/trace.c 	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
cpu              3417 kernel/trace/trace.c 	buf_iter = trace_buffer_iter(iter, cpu);
cpu              3435 kernel/trace/trace.c 	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
cpu              3449 kernel/trace/trace.c 	int cpu;
cpu              3472 kernel/trace/trace.c 		iter->cpu = 0;
cpu              3476 kernel/trace/trace.c 			for_each_tracing_cpu(cpu)
cpu              3477 kernel/trace/trace.c 				tracing_iter_reset(iter, cpu);
cpu              3521 kernel/trace/trace.c 		      unsigned long *entries, int cpu)
cpu              3525 kernel/trace/trace.c 	count = ring_buffer_entries_cpu(buf->buffer, cpu);
cpu              3531 kernel/trace/trace.c 	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
cpu              3532 kernel/trace/trace.c 		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
cpu              3537 kernel/trace/trace.c 			ring_buffer_overrun_cpu(buf->buffer, cpu);
cpu              3546 kernel/trace/trace.c 	int cpu;
cpu              3551 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              3552 kernel/trace/trace.c 		get_total_entries_cpu(buf, &t, &e, cpu);
cpu              3558 kernel/trace/trace.c unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
cpu              3565 kernel/trace/trace.c 	get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
cpu              3639 kernel/trace/trace.c 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
cpu              3658 kernel/trace/trace.c 		   buf->cpu,
cpu              3708 kernel/trace/trace.c 	    cpumask_test_cpu(iter->cpu, iter->started))
cpu              3711 kernel/trace/trace.c 	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
cpu              3715 kernel/trace/trace.c 		cpumask_set_cpu(iter->cpu, iter->started);
cpu              3720 kernel/trace/trace.c 				iter->cpu);
cpu              3766 kernel/trace/trace.c 				 entry->pid, iter->cpu, iter->ts);
cpu              3792 kernel/trace/trace.c 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
cpu              3821 kernel/trace/trace.c 		SEQ_PUT_FIELD(s, iter->cpu);
cpu              3835 kernel/trace/trace.c 	int cpu;
cpu              3839 kernel/trace/trace.c 		cpu = iter->cpu_file;
cpu              3840 kernel/trace/trace.c 		buf_iter = trace_buffer_iter(iter, cpu);
cpu              3845 kernel/trace/trace.c 			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
cpu              3851 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              3852 kernel/trace/trace.c 		buf_iter = trace_buffer_iter(iter, cpu);
cpu              3857 kernel/trace/trace.c 			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
cpu              3874 kernel/trace/trace.c 				 iter->cpu, iter->lost_events);
cpu              4075 kernel/trace/trace.c 	int cpu;
cpu              4134 kernel/trace/trace.c 		for_each_tracing_cpu(cpu) {
cpu              4135 kernel/trace/trace.c 			iter->buffer_iter[cpu] =
cpu              4137 kernel/trace/trace.c 							 cpu, GFP_KERNEL);
cpu              4140 kernel/trace/trace.c 		for_each_tracing_cpu(cpu) {
cpu              4141 kernel/trace/trace.c 			ring_buffer_read_start(iter->buffer_iter[cpu]);
cpu              4142 kernel/trace/trace.c 			tracing_iter_reset(iter, cpu);
cpu              4145 kernel/trace/trace.c 		cpu = iter->cpu_file;
cpu              4146 kernel/trace/trace.c 		iter->buffer_iter[cpu] =
cpu              4148 kernel/trace/trace.c 						 cpu, GFP_KERNEL);
cpu              4150 kernel/trace/trace.c 		ring_buffer_read_start(iter->buffer_iter[cpu]);
cpu              4151 kernel/trace/trace.c 		tracing_iter_reset(iter, cpu);
cpu              4207 kernel/trace/trace.c 	int cpu;
cpu              4218 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              4219 kernel/trace/trace.c 		if (iter->buffer_iter[cpu])
cpu              4220 kernel/trace/trace.c 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
cpu              4272 kernel/trace/trace.c 		int cpu = tracing_get_cpu(inode);
cpu              4280 kernel/trace/trace.c 		if (cpu == RING_BUFFER_ALL_CPUS)
cpu              4283 kernel/trace/trace.c 			tracing_reset_cpu(trace_buf, cpu);
cpu              4476 kernel/trace/trace.c 	int err, cpu;
cpu              4487 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              4492 kernel/trace/trace.c 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpu              4493 kernel/trace/trace.c 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
cpu              4494 kernel/trace/trace.c 			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
cpu              4495 kernel/trace/trace.c 			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
cpu              4497 kernel/trace/trace.c 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpu              4498 kernel/trace/trace.c 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
cpu              4499 kernel/trace/trace.c 			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
cpu              4500 kernel/trace/trace.c 			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
cpu              5456 kernel/trace/trace.c 	int cpu;
cpu              5458 kernel/trace/trace.c 	for_each_tracing_cpu(cpu)
cpu              5459 kernel/trace/trace.c 		per_cpu_ptr(buf->data, cpu)->entries = val;
cpu              5467 kernel/trace/trace.c 	int cpu, ret = 0;
cpu              5470 kernel/trace/trace.c 		for_each_tracing_cpu(cpu) {
cpu              5472 kernel/trace/trace.c 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
cpu              5475 kernel/trace/trace.c 			per_cpu_ptr(trace_buf->data, cpu)->entries =
cpu              5476 kernel/trace/trace.c 				per_cpu_ptr(size_buf->data, cpu)->entries;
cpu              5491 kernel/trace/trace.c 					unsigned long size, int cpu)
cpu              5506 kernel/trace/trace.c 	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
cpu              5515 kernel/trace/trace.c 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
cpu              5518 kernel/trace/trace.c 						     &tr->trace_buffer, cpu);
cpu              5540 kernel/trace/trace.c 	if (cpu == RING_BUFFER_ALL_CPUS)
cpu              5543 kernel/trace/trace.c 		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
cpu              5548 kernel/trace/trace.c 	if (cpu == RING_BUFFER_ALL_CPUS)
cpu              5551 kernel/trace/trace.c 		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
cpu              6255 kernel/trace/trace.c 	int cpu = tracing_get_cpu(inode);
cpu              6262 kernel/trace/trace.c 	if (cpu == RING_BUFFER_ALL_CPUS) {
cpu              6263 kernel/trace/trace.c 		int cpu, buf_size_same;
cpu              6269 kernel/trace/trace.c 		for_each_tracing_cpu(cpu) {
cpu              6272 kernel/trace/trace.c 				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
cpu              6273 kernel/trace/trace.c 			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
cpu              6289 kernel/trace/trace.c 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
cpu              6331 kernel/trace/trace.c 	int r, cpu;
cpu              6335 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              6336 kernel/trace/trace.c 		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
cpu              7351 kernel/trace/trace.c 	int			cpu;
cpu              7359 kernel/trace/trace.c 	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
cpu              7465 kernel/trace/trace.c 		ref->cpu = iter->cpu_file;
cpu              7470 kernel/trace/trace.c 			ring_buffer_free_read_page(ref->buffer, ref->cpu,
cpu              7530 kernel/trace/trace.c 	int cpu = tracing_get_cpu(inode);
cpu              7542 kernel/trace/trace.c 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
cpu              7545 kernel/trace/trace.c 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
cpu              7548 kernel/trace/trace.c 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
cpu              7551 kernel/trace/trace.c 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
cpu              7556 kernel/trace/trace.c 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
cpu              7561 kernel/trace/trace.c 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
cpu              7567 kernel/trace/trace.c 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
cpu              7570 kernel/trace/trace.c 				ring_buffer_time_stamp(trace_buf->buffer, cpu));
cpu              7573 kernel/trace/trace.c 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
cpu              7576 kernel/trace/trace.c 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
cpu              7788 kernel/trace/trace.c static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
cpu              7802 kernel/trace/trace.c 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
cpu              7809 kernel/trace/trace.c 		      void *data, long cpu, const struct file_operations *fops)
cpu              7814 kernel/trace/trace.c 		d_inode(ret)->i_cdev = (void *)(cpu + 1);
cpu              7819 kernel/trace/trace.c tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
cpu              7821 kernel/trace/trace.c 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
cpu              7828 kernel/trace/trace.c 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
cpu              7837 kernel/trace/trace.c 				tr, cpu, &tracing_pipe_fops);
cpu              7841 kernel/trace/trace.c 				tr, cpu, &tracing_fops);
cpu              7844 kernel/trace/trace.c 				tr, cpu, &tracing_buffers_fops);
cpu              7847 kernel/trace/trace.c 				tr, cpu, &tracing_stats_fops);
cpu              7850 kernel/trace/trace.c 				tr, cpu, &tracing_entries_fops);
cpu              7854 kernel/trace/trace.c 				tr, cpu, &snapshot_fops);
cpu              7857 kernel/trace/trace.c 				tr, cpu, &snapshot_raw_fops);
cpu              8559 kernel/trace/trace.c 	int cpu;
cpu              8632 kernel/trace/trace.c 	for_each_tracing_cpu(cpu)
cpu              8633 kernel/trace/trace.c 		tracing_init_tracefs_percpu(tr, cpu);
cpu              8926 kernel/trace/trace.c 	int cnt = 0, cpu;
cpu              8950 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              8951 kernel/trace/trace.c 		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cpu              9018 kernel/trace/trace.c 	for_each_tracing_cpu(cpu) {
cpu              9019 kernel/trace/trace.c 		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cpu               181 kernel/trace/trace.h 	int				cpu;
cpu               674 kernel/trace/trace.h trace_buffer_iter(struct trace_iterator *iter, int cpu)
cpu               676 kernel/trace/trace.h 	return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
cpu               682 kernel/trace/trace.h void tracing_reset_current(int cpu);
cpu               722 kernel/trace/trace.h void tracing_iter_reset(struct trace_iterator *iter, int cpu);
cpu               724 kernel/trace/trace.h unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
cpu               756 kernel/trace/trace.h #define for_each_tracing_cpu(cpu)	\
cpu               757 kernel/trace/trace.h 	for_each_cpu(cpu, tracing_buffer_mask)
cpu               783 kernel/trace/trace.h void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
cpu               786 kernel/trace/trace.h 			  struct task_struct *tsk, int cpu);
cpu               799 kernel/trace/trace.h extern u64 ftrace_now(int cpu);
cpu                96 kernel/trace/trace_event_perf.c 	int cpu;
cpu               106 kernel/trace/trace_event_perf.c 	for_each_possible_cpu(cpu)
cpu               107 kernel/trace/trace_event_perf.c 		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
cpu               168 kernel/trace/trace_events.c 	__generic_field(int, cpu, FILTER_CPU);
cpu               604 kernel/trace/trace_events.c 	int cpu;
cpu               627 kernel/trace/trace_events.c 	for_each_possible_cpu(cpu)
cpu               628 kernel/trace/trace_events.c 		per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
cpu              3366 kernel/trace/trace_events.c 	int cpu;
cpu              3371 kernel/trace/trace_events.c 	cpu = raw_smp_processor_id();
cpu              3372 kernel/trace/trace_events.c 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
cpu              3391 kernel/trace/trace_events.c 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
cpu               712 kernel/trace/trace_events_filter.c 	int cpu, cmp;
cpu               714 kernel/trace/trace_events_filter.c 	cpu = raw_smp_processor_id();
cpu               719 kernel/trace/trace_events_filter.c 		return cpu == cmp;
cpu               721 kernel/trace/trace_events_filter.c 		return cpu != cmp;
cpu               723 kernel/trace/trace_events_filter.c 		return cpu < cmp;
cpu               725 kernel/trace/trace_events_filter.c 		return cpu <= cmp;
cpu               727 kernel/trace/trace_events_filter.c 		return cpu > cmp;
cpu               729 kernel/trace/trace_events_filter.c 		return cpu >= cmp;
cpu              1552 kernel/trace/trace_events_hist.c 	int cpu = smp_processor_id();
cpu              1554 kernel/trace/trace_events_hist.c 	return cpu;
cpu               104 kernel/trace/trace_functions.c 	tr->trace_buffer.cpu = get_cpu();
cpu               132 kernel/trace/trace_functions.c 	int cpu;
cpu               145 kernel/trace/trace_functions.c 	cpu = smp_processor_id();
cpu               146 kernel/trace/trace_functions.c 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
cpu               183 kernel/trace/trace_functions.c 	int cpu;
cpu               194 kernel/trace/trace_functions.c 	cpu = raw_smp_processor_id();
cpu               195 kernel/trace/trace_functions.c 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
cpu                37 kernel/trace/trace_functions_graph.c 	int				cpu;
cpu                48 kernel/trace/trace_functions_graph.c 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
cpu               134 kernel/trace/trace_functions_graph.c 	int cpu;
cpu               173 kernel/trace/trace_functions_graph.c 	cpu = raw_smp_processor_id();
cpu               174 kernel/trace/trace_functions_graph.c 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
cpu               243 kernel/trace/trace_functions_graph.c 	int cpu;
cpu               254 kernel/trace/trace_functions_graph.c 	cpu = raw_smp_processor_id();
cpu               255 kernel/trace/trace_functions_graph.c 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
cpu               333 kernel/trace/trace_functions_graph.c static void print_graph_cpu(struct trace_seq *s, int cpu)
cpu               340 kernel/trace/trace_functions_graph.c 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
cpu               385 kernel/trace/trace_functions_graph.c verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
cpu               393 kernel/trace/trace_functions_graph.c 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
cpu               412 kernel/trace/trace_functions_graph.c 	print_graph_cpu(s, cpu);
cpu               437 kernel/trace/trace_functions_graph.c 		ring_iter = trace_buffer_iter(iter, iter->cpu);
cpu               447 kernel/trace/trace_functions_graph.c 			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
cpu               449 kernel/trace/trace_functions_graph.c 			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
cpu               514 kernel/trace/trace_functions_graph.c 		enum trace_type type, int cpu, pid_t pid, u32 flags)
cpu               535 kernel/trace/trace_functions_graph.c 			print_graph_cpu(s, cpu);
cpu               636 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
cpu               646 kernel/trace/trace_functions_graph.c 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu               671 kernel/trace/trace_functions_graph.c 			cpu, iter->ent->pid, flags);
cpu               679 kernel/trace/trace_functions_graph.c 			 struct trace_seq *s, int cpu, u32 flags)
cpu               688 kernel/trace/trace_functions_graph.c 		int cpu = iter->cpu;
cpu               690 kernel/trace/trace_functions_graph.c 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu               725 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
cpu               728 kernel/trace/trace_functions_graph.c 	verif_pid(s, ent->pid, cpu, data);
cpu               732 kernel/trace/trace_functions_graph.c 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
cpu               747 kernel/trace/trace_functions_graph.c 		print_graph_cpu(s, cpu);
cpu               777 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
cpu               790 kernel/trace/trace_functions_graph.c 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
cpu               823 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
cpu               836 kernel/trace/trace_functions_graph.c 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
cpu               872 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
cpu               883 kernel/trace/trace_functions_graph.c 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
cpu               892 kernel/trace/trace_functions_graph.c 			data->cpu = cpu;
cpu               909 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
cpu               918 kernel/trace/trace_functions_graph.c 		int cpu = iter->cpu;
cpu               920 kernel/trace/trace_functions_graph.c 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu               964 kernel/trace/trace_functions_graph.c 			cpu, pid, flags);
cpu               982 kernel/trace/trace_functions_graph.c 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
cpu              1045 kernel/trace/trace_functions_graph.c 	int cpu = iter->cpu;
cpu              1048 kernel/trace/trace_functions_graph.c 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
cpu              1049 kernel/trace/trace_functions_graph.c 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
cpu              1059 kernel/trace/trace_functions_graph.c 		iter->cpu = data->cpu;
cpu              1061 kernel/trace/trace_functions_graph.c 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
cpu              1062 kernel/trace/trace_functions_graph.c 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
cpu              1065 kernel/trace/trace_functions_graph.c 		iter->cpu = cpu;
cpu              1205 kernel/trace/trace_functions_graph.c 	int cpu;
cpu              1220 kernel/trace/trace_functions_graph.c 	for_each_possible_cpu(cpu) {
cpu              1221 kernel/trace/trace_functions_graph.c 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
cpu              1222 kernel/trace/trace_functions_graph.c 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
cpu              1223 kernel/trace/trace_functions_graph.c 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
cpu              1224 kernel/trace/trace_functions_graph.c 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
cpu               104 kernel/trace/trace_irqsoff.c 	int cpu;
cpu               112 kernel/trace/trace_irqsoff.c 	cpu = raw_smp_processor_id();
cpu               113 kernel/trace/trace_irqsoff.c 	if (likely(!per_cpu(tracing_cpu, cpu)))
cpu               125 kernel/trace/trace_irqsoff.c 	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
cpu               159 kernel/trace/trace_irqsoff.c 	int cpu;
cpu               166 kernel/trace/trace_irqsoff.c 	for_each_possible_cpu(cpu)
cpu               167 kernel/trace/trace_irqsoff.c 		per_cpu(tracing_cpu, cpu) = 0;
cpu               321 kernel/trace/trace_irqsoff.c 		      int cpu)
cpu               328 kernel/trace/trace_irqsoff.c 	T1 = ftrace_now(cpu);
cpu               355 kernel/trace/trace_irqsoff.c 		update_max_tr_single(tr, current, cpu);
cpu               365 kernel/trace/trace_irqsoff.c 	data->preempt_timestamp = ftrace_now(cpu);
cpu               372 kernel/trace/trace_irqsoff.c 	int cpu;
cpu               380 kernel/trace/trace_irqsoff.c 	cpu = raw_smp_processor_id();
cpu               382 kernel/trace/trace_irqsoff.c 	if (per_cpu(tracing_cpu, cpu))
cpu               385 kernel/trace/trace_irqsoff.c 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
cpu               393 kernel/trace/trace_irqsoff.c 	data->preempt_timestamp = ftrace_now(cpu);
cpu               400 kernel/trace/trace_irqsoff.c 	per_cpu(tracing_cpu, cpu) = 1;
cpu               408 kernel/trace/trace_irqsoff.c 	int cpu;
cpu               413 kernel/trace/trace_irqsoff.c 	cpu = raw_smp_processor_id();
cpu               415 kernel/trace/trace_irqsoff.c 	if (unlikely(per_cpu(tracing_cpu, cpu)))
cpu               416 kernel/trace/trace_irqsoff.c 		per_cpu(tracing_cpu, cpu) = 0;
cpu               423 kernel/trace/trace_irqsoff.c 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
cpu               433 kernel/trace/trace_irqsoff.c 	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
cpu                27 kernel/trace/trace_kdb.c 	int cnt = 0, cpu;
cpu                44 kernel/trace/trace_kdb.c 		for_each_tracing_cpu(cpu) {
cpu                45 kernel/trace/trace_kdb.c 			iter.buffer_iter[cpu] =
cpu                47 kernel/trace/trace_kdb.c 						 cpu, GFP_ATOMIC);
cpu                48 kernel/trace/trace_kdb.c 			ring_buffer_read_start(iter.buffer_iter[cpu]);
cpu                49 kernel/trace/trace_kdb.c 			tracing_iter_reset(&iter, cpu);
cpu                84 kernel/trace/trace_kdb.c 	for_each_tracing_cpu(cpu) {
cpu                85 kernel/trace/trace_kdb.c 		if (iter.buffer_iter[cpu]) {
cpu                86 kernel/trace/trace_kdb.c 			ring_buffer_read_finish(iter.buffer_iter[cpu]);
cpu                87 kernel/trace/trace_kdb.c 			iter.buffer_iter[cpu] = NULL;
cpu               101 kernel/trace/trace_kdb.c 	int cpu;
cpu               126 kernel/trace/trace_kdb.c 	for_each_tracing_cpu(cpu) {
cpu               127 kernel/trace/trace_kdb.c 		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cpu               141 kernel/trace/trace_kdb.c 	for_each_tracing_cpu(cpu) {
cpu               142 kernel/trace/trace_kdb.c 		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cpu               176 kernel/trace/trace_kprobe.c 	int cpu;
cpu               178 kernel/trace/trace_kprobe.c 	for_each_possible_cpu(cpu)
cpu               179 kernel/trace/trace_kprobe.c 		nhit += *per_cpu_ptr(tk->nhit, cpu);
cpu               479 kernel/trace/trace_output.c lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
cpu               486 kernel/trace/trace_output.c 			 comm, entry->pid, cpu);
cpu               587 kernel/trace/trace_output.c 	trace_seq_printf(s, "[%03d] ", iter->cpu);
cpu               628 kernel/trace/trace_output.c 			comm, entry->pid, iter->cpu, entry->flags,
cpu               631 kernel/trace/trace_output.c 		lat_print_generic(s, entry, iter->cpu);
cpu                73 kernel/trace/trace_sched_wakeup.c 	int cpu;
cpu                81 kernel/trace/trace_sched_wakeup.c 	cpu = raw_smp_processor_id();
cpu                82 kernel/trace/trace_sched_wakeup.c 	if (cpu != wakeup_current_cpu)
cpu                85 kernel/trace/trace_sched_wakeup.c 	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
cpu               366 kernel/trace/trace_sched_wakeup.c probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
cpu               371 kernel/trace/trace_sched_wakeup.c 	wakeup_current_cpu = cpu;
cpu               438 kernel/trace/trace_sched_wakeup.c 	int cpu;
cpu               461 kernel/trace/trace_sched_wakeup.c 	cpu = raw_smp_processor_id();
cpu               462 kernel/trace/trace_sched_wakeup.c 	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
cpu               481 kernel/trace/trace_sched_wakeup.c 	T1 = ftrace_now(cpu);
cpu               497 kernel/trace/trace_sched_wakeup.c 	atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
cpu               529 kernel/trace/trace_sched_wakeup.c 	int cpu = smp_processor_id();
cpu               554 kernel/trace/trace_sched_wakeup.c 	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
cpu               587 kernel/trace/trace_sched_wakeup.c 	data->preempt_timestamp = ftrace_now(cpu);
cpu               601 kernel/trace/trace_sched_wakeup.c 	atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
cpu                26 kernel/trace/trace_selftest.c static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
cpu                32 kernel/trace/trace_selftest.c 	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
cpu                66 kernel/trace/trace_selftest.c 	int cpu, ret = 0;
cpu                82 kernel/trace/trace_selftest.c 	for_each_possible_cpu(cpu) {
cpu                83 kernel/trace/trace_selftest.c 		ret = trace_test_buffer_cpu(buf, cpu);
cpu               845 kernel/trace/trace_uprobe.c 	int cpu, err_cpu;
cpu               851 kernel/trace/trace_uprobe.c 	for_each_possible_cpu(cpu) {
cpu               852 kernel/trace/trace_uprobe.c 		struct page *p = alloc_pages_node(cpu_to_node(cpu),
cpu               855 kernel/trace/trace_uprobe.c 			err_cpu = cpu;
cpu               858 kernel/trace/trace_uprobe.c 		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
cpu               859 kernel/trace/trace_uprobe.c 		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
cpu               865 kernel/trace/trace_uprobe.c 	for_each_possible_cpu(cpu) {
cpu               866 kernel/trace/trace_uprobe.c 		if (cpu == err_cpu)
cpu               868 kernel/trace/trace_uprobe.c 		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
cpu               892 kernel/trace/trace_uprobe.c 	int cpu;
cpu               897 kernel/trace/trace_uprobe.c 		for_each_possible_cpu(cpu)
cpu               899 kernel/trace/trace_uprobe.c 							     cpu)->buf);
cpu               909 kernel/trace/trace_uprobe.c 	int cpu;
cpu               911 kernel/trace/trace_uprobe.c 	cpu = raw_smp_processor_id();
cpu               912 kernel/trace/trace_uprobe.c 	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
cpu                12 kernel/up.c    int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
cpu                17 kernel/up.c    	WARN_ON(cpu != 0);
cpu                27 kernel/up.c    int smp_call_function_single_async(int cpu, call_single_data_t *csd)
cpu                71 kernel/up.c    void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
cpu                87 kernel/up.c    void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
cpu                95 kernel/up.c    int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
cpu                99 kernel/up.c    	if (cpu != 0)
cpu               105 kernel/watchdog.c int __weak watchdog_nmi_enable(unsigned int cpu)
cpu               111 kernel/watchdog.c void __weak watchdog_nmi_disable(unsigned int cpu)
cpu               291 kernel/watchdog.c 	int cpu;
cpu               302 kernel/watchdog.c 	for_each_cpu(cpu, &watchdog_allowed_mask)
cpu               303 kernel/watchdog.c 		per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
cpu               481 kernel/watchdog.c static void watchdog_enable(unsigned int cpu)
cpu               486 kernel/watchdog.c 	WARN_ON_ONCE(cpu != smp_processor_id());
cpu               504 kernel/watchdog.c 		watchdog_nmi_enable(cpu);
cpu               507 kernel/watchdog.c static void watchdog_disable(unsigned int cpu)
cpu               511 kernel/watchdog.c 	WARN_ON_ONCE(cpu != smp_processor_id());
cpu               518 kernel/watchdog.c 	watchdog_nmi_disable(cpu);
cpu               531 kernel/watchdog.c 	int cpu;
cpu               536 kernel/watchdog.c 	for_each_cpu(cpu, &watchdog_allowed_mask)
cpu               537 kernel/watchdog.c 		smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
cpu               550 kernel/watchdog.c 	int cpu;
cpu               553 kernel/watchdog.c 	for_each_cpu(cpu, &watchdog_allowed_mask)
cpu               554 kernel/watchdog.c 		smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
cpu               557 kernel/watchdog.c int lockup_detector_online_cpu(unsigned int cpu)
cpu               559 kernel/watchdog.c 	if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
cpu               560 kernel/watchdog.c 		watchdog_enable(cpu);
cpu               564 kernel/watchdog.c int lockup_detector_offline_cpu(unsigned int cpu)
cpu               566 kernel/watchdog.c 	if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
cpu               567 kernel/watchdog.c 		watchdog_disable(cpu);
cpu               168 kernel/watchdog_hld.c 	unsigned int cpu = smp_processor_id();
cpu               176 kernel/watchdog_hld.c 	evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
cpu               179 kernel/watchdog_hld.c 		pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
cpu               225 kernel/watchdog_hld.c 	int cpu;
cpu               227 kernel/watchdog_hld.c 	for_each_cpu(cpu, &dead_events_mask) {
cpu               228 kernel/watchdog_hld.c 		struct perf_event *event = per_cpu(dead_event, cpu);
cpu               236 kernel/watchdog_hld.c 		per_cpu(dead_event, cpu) = NULL;
cpu               248 kernel/watchdog_hld.c 	int cpu;
cpu               252 kernel/watchdog_hld.c 	for_each_online_cpu(cpu) {
cpu               253 kernel/watchdog_hld.c 		struct perf_event *event = per_cpu(watchdog_ev, cpu);
cpu               267 kernel/watchdog_hld.c 	int cpu;
cpu               274 kernel/watchdog_hld.c 	for_each_online_cpu(cpu) {
cpu               275 kernel/watchdog_hld.c 		struct perf_event *event = per_cpu(watchdog_ev, cpu);
cpu               149 kernel/workqueue.c 	int			cpu;		/* I: the associated cpu */
cpu               378 kernel/workqueue.c #define for_each_cpu_worker_pool(pool, cpu)				\
cpu               379 kernel/workqueue.c 	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
cpu               380 kernel/workqueue.c 	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
cpu              1367 kernel/workqueue.c static int wq_select_unbound_cpu(int cpu)
cpu              1373 kernel/workqueue.c 		if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
cpu              1374 kernel/workqueue.c 			return cpu;
cpu              1381 kernel/workqueue.c 		return cpu;
cpu              1388 kernel/workqueue.c 			return cpu;
cpu              1395 kernel/workqueue.c static void __queue_work(int cpu, struct workqueue_struct *wq,
cpu              1402 kernel/workqueue.c 	unsigned int req_cpu = cpu;
cpu              1423 kernel/workqueue.c 			cpu = wq_select_unbound_cpu(raw_smp_processor_id());
cpu              1424 kernel/workqueue.c 		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
cpu              1427 kernel/workqueue.c 			cpu = raw_smp_processor_id();
cpu              1428 kernel/workqueue.c 		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
cpu              1471 kernel/workqueue.c 			  wq->name, cpu);
cpu              1512 kernel/workqueue.c bool queue_work_on(int cpu, struct workqueue_struct *wq,
cpu              1521 kernel/workqueue.c 		__queue_work(cpu, wq, work);
cpu              1541 kernel/workqueue.c 	int cpu;
cpu              1552 kernel/workqueue.c 	cpu = raw_smp_processor_id();
cpu              1553 kernel/workqueue.c 	if (node == cpu_to_node(cpu))
cpu              1554 kernel/workqueue.c 		return cpu;
cpu              1557 kernel/workqueue.c 	cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
cpu              1560 kernel/workqueue.c 	return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
cpu              1603 kernel/workqueue.c 		int cpu = workqueue_select_cpu_near(node);
cpu              1605 kernel/workqueue.c 		__queue_work(cpu, wq, work);
cpu              1619 kernel/workqueue.c 	__queue_work(dwork->cpu, dwork->wq, &dwork->work);
cpu              1623 kernel/workqueue.c static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
cpu              1641 kernel/workqueue.c 		__queue_work(cpu, wq, &dwork->work);
cpu              1646 kernel/workqueue.c 	dwork->cpu = cpu;
cpu              1649 kernel/workqueue.c 	if (unlikely(cpu != WORK_CPU_UNBOUND))
cpu              1650 kernel/workqueue.c 		add_timer_on(timer, cpu);
cpu              1666 kernel/workqueue.c bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
cpu              1677 kernel/workqueue.c 		__queue_delayed_work(cpu, wq, dwork, delay);
cpu              1704 kernel/workqueue.c bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
cpu              1715 kernel/workqueue.c 		__queue_delayed_work(cpu, wq, dwork, delay);
cpu              1926 kernel/workqueue.c 	if (pool->cpu >= 0)
cpu              1927 kernel/workqueue.c 		snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
cpu              2188 kernel/workqueue.c 		     raw_smp_processor_id() != pool->cpu);
cpu              3192 kernel/workqueue.c 		__queue_work(dwork->cpu, dwork->wq, &dwork->work);
cpu              3285 kernel/workqueue.c 	int cpu;
cpu              3294 kernel/workqueue.c 	for_each_online_cpu(cpu) {
cpu              3295 kernel/workqueue.c 		struct work_struct *work = per_cpu_ptr(works, cpu);
cpu              3298 kernel/workqueue.c 		schedule_work_on(cpu, work);
cpu              3301 kernel/workqueue.c 	for_each_online_cpu(cpu)
cpu              3302 kernel/workqueue.c 		flush_work(per_cpu_ptr(works, cpu));
cpu              3423 kernel/workqueue.c 	pool->cpu = -1;
cpu              3533 kernel/workqueue.c 	if (WARN_ON(!(pool->cpu < 0)) ||
cpu              4082 kernel/workqueue.c static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
cpu              4085 kernel/workqueue.c 	int node = cpu_to_node(cpu);
cpu              4086 kernel/workqueue.c 	int cpu_off = online ? -1 : cpu;
cpu              4148 kernel/workqueue.c 	int cpu, ret;
cpu              4155 kernel/workqueue.c 		for_each_possible_cpu(cpu) {
cpu              4157 kernel/workqueue.c 				per_cpu_ptr(wq->cpu_pwqs, cpu);
cpu              4159 kernel/workqueue.c 				per_cpu(cpu_worker_pools, cpu);
cpu              4502 kernel/workqueue.c bool workqueue_congested(int cpu, struct workqueue_struct *wq)
cpu              4510 kernel/workqueue.c 	if (cpu == WORK_CPU_UNBOUND)
cpu              4511 kernel/workqueue.c 		cpu = smp_processor_id();
cpu              4514 kernel/workqueue.c 		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
cpu              4516 kernel/workqueue.c 		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
cpu              4863 kernel/workqueue.c static void unbind_workers(int cpu)
cpu              4868 kernel/workqueue.c 	for_each_cpu_worker_pool(pool, cpu) {
cpu              4991 kernel/workqueue.c static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
cpu              4999 kernel/workqueue.c 	if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
cpu              5009 kernel/workqueue.c int workqueue_prepare_cpu(unsigned int cpu)
cpu              5013 kernel/workqueue.c 	for_each_cpu_worker_pool(pool, cpu) {
cpu              5022 kernel/workqueue.c int workqueue_online_cpu(unsigned int cpu)
cpu              5033 kernel/workqueue.c 		if (pool->cpu == cpu)
cpu              5035 kernel/workqueue.c 		else if (pool->cpu < 0)
cpu              5036 kernel/workqueue.c 			restore_unbound_workers_cpumask(pool, cpu);
cpu              5043 kernel/workqueue.c 		wq_update_unbound_numa(wq, cpu, true);
cpu              5049 kernel/workqueue.c int workqueue_offline_cpu(unsigned int cpu)
cpu              5054 kernel/workqueue.c 	if (WARN_ON(cpu != smp_processor_id()))
cpu              5057 kernel/workqueue.c 	unbind_workers(cpu);
cpu              5062 kernel/workqueue.c 		wq_update_unbound_numa(wq, cpu, false);
cpu              5093 kernel/workqueue.c long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
cpu              5098 kernel/workqueue.c 	schedule_work_on(cpu, &wfc.work);
cpu              5116 kernel/workqueue.c long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
cpu              5121 kernel/workqueue.c 	if (cpu_online(cpu))
cpu              5122 kernel/workqueue.c 		ret = work_on_cpu(cpu, fn, arg);
cpu              5720 kernel/workqueue.c 	int cpu;
cpu              5723 kernel/workqueue.c 	for_each_possible_cpu(cpu)
cpu              5724 kernel/workqueue.c 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
cpu              5754 kernel/workqueue.c 		if (pool->cpu >= 0) {
cpu              5757 kernel/workqueue.c 						  pool->cpu));
cpu              5781 kernel/workqueue.c notrace void wq_watchdog_touch(int cpu)
cpu              5783 kernel/workqueue.c 	if (cpu >= 0)
cpu              5784 kernel/workqueue.c 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
cpu              5842 kernel/workqueue.c 	int node, cpu;
cpu              5867 kernel/workqueue.c 	for_each_possible_cpu(cpu) {
cpu              5868 kernel/workqueue.c 		node = cpu_to_node(cpu);
cpu              5870 kernel/workqueue.c 			pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
cpu              5874 kernel/workqueue.c 		cpumask_set_cpu(cpu, tbl[node]);
cpu              5895 kernel/workqueue.c 	int i, cpu;
cpu              5905 kernel/workqueue.c 	for_each_possible_cpu(cpu) {
cpu              5909 kernel/workqueue.c 		for_each_cpu_worker_pool(pool, cpu) {
cpu              5911 kernel/workqueue.c 			pool->cpu = cpu;
cpu              5912 kernel/workqueue.c 			cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
cpu              5914 kernel/workqueue.c 			pool->node = cpu_to_node(cpu);
cpu              5975 kernel/workqueue.c 	int cpu, bkt;
cpu              5990 kernel/workqueue.c 	for_each_possible_cpu(cpu) {
cpu              5991 kernel/workqueue.c 		for_each_cpu_worker_pool(pool, cpu) {
cpu              5992 kernel/workqueue.c 			pool->node = cpu_to_node(cpu);
cpu              6006 kernel/workqueue.c 	for_each_online_cpu(cpu) {
cpu              6007 kernel/workqueue.c 		for_each_cpu_worker_pool(pool, cpu) {
cpu                28 lib/cpu_rmap.c 	unsigned int cpu;
cpu                52 lib/cpu_rmap.c 	for_each_possible_cpu(cpu) {
cpu                53 lib/cpu_rmap.c 		rmap->near[cpu].index = cpu % size;
cpu                54 lib/cpu_rmap.c 		rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
cpu                94 lib/cpu_rmap.c static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
cpu               100 lib/cpu_rmap.c 		if (rmap->near[cpu].dist > dist &&
cpu               102 lib/cpu_rmap.c 			rmap->near[cpu].index = rmap->near[neigh].index;
cpu               103 lib/cpu_rmap.c 			rmap->near[cpu].dist = dist;
cpu               114 lib/cpu_rmap.c 	unsigned int cpu;
cpu               118 lib/cpu_rmap.c 	for_each_possible_cpu(cpu) {
cpu               119 lib/cpu_rmap.c 		index = rmap->near[cpu].index;
cpu               121 lib/cpu_rmap.c 			cpu, index, rmap->near[cpu].dist);
cpu               159 lib/cpu_rmap.c 	unsigned int cpu;
cpu               167 lib/cpu_rmap.c 	for_each_online_cpu(cpu) {
cpu               168 lib/cpu_rmap.c 		if (rmap->near[cpu].index == index) {
cpu               169 lib/cpu_rmap.c 			rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
cpu               170 lib/cpu_rmap.c 			cpumask_set_cpu(cpu, update_mask);
cpu               179 lib/cpu_rmap.c 	for_each_cpu(cpu, affinity) {
cpu               180 lib/cpu_rmap.c 		rmap->near[cpu].index = index;
cpu               181 lib/cpu_rmap.c 		rmap->near[cpu].dist = 0;
cpu               183 lib/cpu_rmap.c 			   cpumask_of_node(cpu_to_node(cpu)));
cpu               189 lib/cpu_rmap.c 	for_each_cpu(cpu, update_mask) {
cpu               190 lib/cpu_rmap.c 		if (cpu_rmap_copy_neigh(rmap, cpu,
cpu               191 lib/cpu_rmap.c 					topology_sibling_cpumask(cpu), 1))
cpu               193 lib/cpu_rmap.c 		if (cpu_rmap_copy_neigh(rmap, cpu,
cpu               194 lib/cpu_rmap.c 					topology_core_cpumask(cpu), 2))
cpu               196 lib/cpu_rmap.c 		if (cpu_rmap_copy_neigh(rmap, cpu,
cpu               197 lib/cpu_rmap.c 					cpumask_of_node(cpu_to_node(cpu)), 3))
cpu                53 lib/cpumask.c  int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
cpu                57 lib/cpumask.c  	cpumask_check(cpu);
cpu                59 lib/cpumask.c  		if (i != cpu)
cpu               208 lib/cpumask.c  	int cpu;
cpu               214 lib/cpumask.c  		for_each_cpu(cpu, cpu_online_mask)
cpu               216 lib/cpumask.c  				return cpu;
cpu               219 lib/cpumask.c  		for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
cpu               221 lib/cpumask.c  				return cpu;
cpu               223 lib/cpumask.c  		for_each_cpu(cpu, cpu_online_mask) {
cpu               225 lib/cpumask.c  			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
cpu               229 lib/cpumask.c  				return cpu;
cpu              1006 lib/debugobjects.c 	int cpu, obj_percpu_free = 0;
cpu              1008 lib/debugobjects.c 	for_each_possible_cpu(cpu)
cpu              1009 lib/debugobjects.c 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
cpu              1355 lib/debugobjects.c 	int cpu, extras;
cpu              1366 lib/debugobjects.c 	for_each_possible_cpu(cpu)
cpu              1367 lib/debugobjects.c 		INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
cpu                93 lib/dump_stack.c 	int cpu;
cpu               101 lib/dump_stack.c 	cpu = smp_processor_id();
cpu               102 lib/dump_stack.c 	old = atomic_cmpxchg(&dump_lock, -1, cpu);
cpu               105 lib/dump_stack.c 	} else if (old == cpu) {
cpu               188 lib/irq_poll.c static int irq_poll_cpu_dead(unsigned int cpu)
cpu               195 lib/irq_poll.c 	list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
cpu                90 lib/nmi_backtrace.c 	int cpu = smp_processor_id();
cpu                92 lib/nmi_backtrace.c 	if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
cpu                95 lib/nmi_backtrace.c 				cpu, (void *)instruction_pointer(regs));
cpu                97 lib/nmi_backtrace.c 			pr_warn("NMI backtrace for cpu %d\n", cpu);
cpu               103 lib/nmi_backtrace.c 		cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
cpu               138 lib/percpu-refcount.c 	int cpu;
cpu               140 lib/percpu-refcount.c 	for_each_possible_cpu(cpu)
cpu               141 lib/percpu-refcount.c 		count += *per_cpu_ptr(percpu_count, cpu);
cpu               197 lib/percpu-refcount.c 	int cpu;
cpu               215 lib/percpu-refcount.c 	for_each_possible_cpu(cpu)
cpu               216 lib/percpu-refcount.c 		*per_cpu_ptr(percpu_count, cpu) = 0;
cpu                62 lib/percpu_counter.c 	int cpu;
cpu                66 lib/percpu_counter.c 	for_each_possible_cpu(cpu) {
cpu                67 lib/percpu_counter.c 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
cpu               108 lib/percpu_counter.c 	int cpu;
cpu               113 lib/percpu_counter.c 	for_each_online_cpu(cpu) {
cpu               114 lib/percpu_counter.c 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
cpu               168 lib/percpu_counter.c static int compute_batch_value(unsigned int cpu)
cpu               176 lib/percpu_counter.c static int percpu_counter_cpu_dead(unsigned int cpu)
cpu               181 lib/percpu_counter.c 	compute_batch_value(cpu);
cpu               188 lib/percpu_counter.c 		pcount = per_cpu_ptr(fbc->counters, cpu);
cpu              1587 lib/radix-tree.c static int radix_tree_cpu_dead(unsigned int cpu)
cpu              1593 lib/radix-tree.c 	rtp = &per_cpu(radix_tree_preloads, cpu);
cpu               577 lib/sbitmap.c  			 unsigned int cpu)
cpu               602 lib/sbitmap.c  		*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
cpu               362 lib/test_vmalloc.c 	int cpu;
cpu               391 lib/test_vmalloc.c 	if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0)
cpu               392 lib/test_vmalloc.c 		pr_err("Failed to set affinity to %d CPU\n", t->cpu);
cpu               418 lib/test_vmalloc.c 				per_cpu_test_data[t->cpu][index].test_passed++;
cpu               420 lib/test_vmalloc.c 				per_cpu_test_data[t->cpu][index].test_failed++;
cpu               429 lib/test_vmalloc.c 		per_cpu_test_data[t->cpu][index].time = delta;
cpu               469 lib/test_vmalloc.c 	int cpu, ret;
cpu               481 lib/test_vmalloc.c 	for_each_cpu(cpu, &cpus_run_test_mask) {
cpu               482 lib/test_vmalloc.c 		struct test_driver *t = &per_cpu_test_driver[cpu];
cpu               484 lib/test_vmalloc.c 		t->cpu = cpu;
cpu               485 lib/test_vmalloc.c 		t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu);
cpu               491 lib/test_vmalloc.c 			pr_err("Failed to start kthread for %d CPU\n", cpu);
cpu               509 lib/test_vmalloc.c 	for_each_cpu(cpu, &cpus_run_test_mask) {
cpu               510 lib/test_vmalloc.c 		struct test_driver *t = &per_cpu_test_driver[cpu];
cpu               523 lib/test_vmalloc.c 				per_cpu_test_data[cpu][i].test_passed,
cpu               524 lib/test_vmalloc.c 				per_cpu_test_data[cpu][i].test_failed,
cpu               526 lib/test_vmalloc.c 				per_cpu_test_data[cpu][i].time);
cpu               530 lib/test_vmalloc.c 			cpu, t->stop - t->start);
cpu              2238 mm/compaction.c 			int cpu;
cpu              2243 mm/compaction.c 				cpu = get_cpu();
cpu              2244 mm/compaction.c 				lru_add_drain_cpu(cpu);
cpu              2699 mm/compaction.c static int kcompactd_cpu_online(unsigned int cpu)
cpu                45 mm/kasan/tags.c 	int cpu;
cpu                47 mm/kasan/tags.c 	for_each_possible_cpu(cpu)
cpu                48 mm/kasan/tags.c 		per_cpu(prng_state, cpu) = (u32)get_cycles();
cpu               909 mm/kmemleak.c  	unsigned int cpu;
cpu               918 mm/kmemleak.c  		for_each_possible_cpu(cpu)
cpu               919 mm/kmemleak.c  			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
cpu               992 mm/kmemleak.c  	unsigned int cpu;
cpu               997 mm/kmemleak.c  		for_each_possible_cpu(cpu)
cpu               999 mm/kmemleak.c  								      cpu));
cpu               838 mm/memcontrol.c 	int cpu;
cpu               840 mm/memcontrol.c 	for_each_possible_cpu(cpu)
cpu               841 mm/memcontrol.c 		x += per_cpu(memcg->vmstats_local->events[event], cpu);
cpu              2270 mm/memcontrol.c 	int cpu, curcpu;
cpu              2282 mm/memcontrol.c 	for_each_online_cpu(cpu) {
cpu              2283 mm/memcontrol.c 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
cpu              2296 mm/memcontrol.c 			if (cpu == curcpu)
cpu              2299 mm/memcontrol.c 				schedule_work_on(cpu, &stock->work);
cpu              2306 mm/memcontrol.c static int memcg_hotplug_cpu_dead(unsigned int cpu)
cpu              2311 mm/memcontrol.c 	stock = &per_cpu(memcg_stock, cpu);
cpu              3477 mm/memcontrol.c 	int node, cpu, i;
cpu              3479 mm/memcontrol.c 	for_each_online_cpu(cpu)
cpu              3481 mm/memcontrol.c 			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
cpu              3494 mm/memcontrol.c 		for_each_online_cpu(cpu)
cpu              3497 mm/memcontrol.c 					pn->lruvec_stat_cpu->count[i], cpu);
cpu              3509 mm/memcontrol.c 	int cpu, i;
cpu              3514 mm/memcontrol.c 	for_each_online_cpu(cpu)
cpu              3517 mm/memcontrol.c 					     cpu);
cpu              4411 mm/memcontrol.c 	int cpu;
cpu              4413 mm/memcontrol.c 	for_each_online_cpu(cpu)
cpu              4414 mm/memcontrol.c 		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
cpu              6970 mm/memcontrol.c 	int cpu, node;
cpu              6986 mm/memcontrol.c 	for_each_possible_cpu(cpu)
cpu              6987 mm/memcontrol.c 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
cpu              1502 mm/memory-failure.c 	int cpu;
cpu              1504 mm/memory-failure.c 	for_each_possible_cpu(cpu) {
cpu              1505 mm/memory-failure.c 		mf_cpu = &per_cpu(memory_failure_cpu, cpu);
cpu               907 mm/memory_hotplug.c 		int cpu;
cpu               916 mm/memory_hotplug.c 		for_each_online_cpu(cpu) {
cpu               919 mm/memory_hotplug.c 			p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
cpu              1652 mm/memory_hotplug.c 	int cpu;
cpu              1654 mm/memory_hotplug.c 	for_each_present_cpu(cpu) {
cpu              1655 mm/memory_hotplug.c 		if (cpu_to_node(cpu) == pgdat->node_id)
cpu              2057 mm/page-writeback.c static int page_writeback_cpu_online(unsigned int cpu)
cpu              2817 mm/page_alloc.c static void drain_pages_zone(unsigned int cpu, struct zone *zone)
cpu              2824 mm/page_alloc.c 	pset = per_cpu_ptr(zone->pageset, cpu);
cpu              2839 mm/page_alloc.c static void drain_pages(unsigned int cpu)
cpu              2844 mm/page_alloc.c 		drain_pages_zone(cpu, zone);
cpu              2856 mm/page_alloc.c 	int cpu = smp_processor_id();
cpu              2859 mm/page_alloc.c 		drain_pages_zone(cpu, zone);
cpu              2861 mm/page_alloc.c 		drain_pages(cpu);
cpu              2891 mm/page_alloc.c 	int cpu;
cpu              2923 mm/page_alloc.c 	for_each_online_cpu(cpu) {
cpu              2929 mm/page_alloc.c 			pcp = per_cpu_ptr(zone->pageset, cpu);
cpu              2934 mm/page_alloc.c 				pcp = per_cpu_ptr(z->pageset, cpu);
cpu              2943 mm/page_alloc.c 			cpumask_set_cpu(cpu, &cpus_with_pcps);
cpu              2945 mm/page_alloc.c 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
cpu              2948 mm/page_alloc.c 	for_each_cpu(cpu, &cpus_with_pcps) {
cpu              2949 mm/page_alloc.c 		struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
cpu              2953 mm/page_alloc.c 		queue_work_on(cpu, mm_percpu_wq, &drain->work);
cpu              2955 mm/page_alloc.c 	for_each_cpu(cpu, &cpus_with_pcps)
cpu              2956 mm/page_alloc.c 		flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
cpu              5251 mm/page_alloc.c 	int cpu;
cpu              5259 mm/page_alloc.c 		for_each_online_cpu(cpu)
cpu              5260 mm/page_alloc.c 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
cpu              5345 mm/page_alloc.c 		for_each_online_cpu(cpu)
cpu              5346 mm/page_alloc.c 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
cpu              5745 mm/page_alloc.c 	int __maybe_unused cpu;
cpu              5777 mm/page_alloc.c 		for_each_online_cpu(cpu)
cpu              5778 mm/page_alloc.c 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
cpu              5788 mm/page_alloc.c 	int cpu;
cpu              5805 mm/page_alloc.c 	for_each_possible_cpu(cpu)
cpu              5806 mm/page_alloc.c 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
cpu              6159 mm/page_alloc.c static void __meminit zone_pageset_init(struct zone *zone, int cpu)
cpu              6161 mm/page_alloc.c 	struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
cpu              6169 mm/page_alloc.c 	int cpu;
cpu              6171 mm/page_alloc.c 	for_each_possible_cpu(cpu)
cpu              6172 mm/page_alloc.c 		zone_pageset_init(zone, cpu);
cpu              7611 mm/page_alloc.c static int page_alloc_cpu_dead(unsigned int cpu)
cpu              7614 mm/page_alloc.c 	lru_add_drain_cpu(cpu);
cpu              7615 mm/page_alloc.c 	drain_pages(cpu);
cpu              7623 mm/page_alloc.c 	vm_events_fold_cpu(cpu);
cpu              7632 mm/page_alloc.c 	cpu_vm_stats_fold(cpu);
cpu              8031 mm/page_alloc.c 		unsigned int cpu;
cpu              8033 mm/page_alloc.c 		for_each_possible_cpu(cpu)
cpu              8035 mm/page_alloc.c 					per_cpu_ptr(zone->pageset, cpu));
cpu              8534 mm/page_alloc.c 	unsigned cpu;
cpu              8536 mm/page_alloc.c 	for_each_possible_cpu(cpu)
cpu              8538 mm/page_alloc.c 				per_cpu_ptr(zone->pageset, cpu));
cpu              8545 mm/page_alloc.c 	int cpu;
cpu              8551 mm/page_alloc.c 		for_each_online_cpu(cpu) {
cpu              8552 mm/page_alloc.c 			pset = per_cpu_ptr(zone->pageset, cpu);
cpu                13 mm/percpu-vm.c 				    unsigned int cpu, int page_idx)
cpu                18 mm/percpu-vm.c 	return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
cpu                56 mm/percpu-vm.c 	unsigned int cpu;
cpu                59 mm/percpu-vm.c 	for_each_possible_cpu(cpu) {
cpu                61 mm/percpu-vm.c 			struct page *page = pages[pcpu_page_idx(cpu, i)];
cpu                85 mm/percpu-vm.c 	unsigned int cpu, tcpu;
cpu                90 mm/percpu-vm.c 	for_each_possible_cpu(cpu) {
cpu                92 mm/percpu-vm.c 			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
cpu                94 mm/percpu-vm.c 			*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
cpu               103 mm/percpu-vm.c 		__free_page(pages[pcpu_page_idx(cpu, i)]);
cpu               106 mm/percpu-vm.c 		if (tcpu == cpu)
cpu               155 mm/percpu-vm.c 	unsigned int cpu;
cpu               158 mm/percpu-vm.c 	for_each_possible_cpu(cpu) {
cpu               162 mm/percpu-vm.c 			page = pcpu_chunk_page(chunk, cpu, i);
cpu               164 mm/percpu-vm.c 			pages[pcpu_page_idx(cpu, i)] = page;
cpu               166 mm/percpu-vm.c 		__pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
cpu               216 mm/percpu-vm.c 	unsigned int cpu, tcpu;
cpu               219 mm/percpu-vm.c 	for_each_possible_cpu(cpu) {
cpu               220 mm/percpu-vm.c 		err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
cpu               221 mm/percpu-vm.c 				       &pages[pcpu_page_idx(cpu, page_start)],
cpu               227 mm/percpu-vm.c 			pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
cpu               233 mm/percpu-vm.c 		if (tcpu == cpu)
cpu               256 mm/percpu.c    static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
cpu               258 mm/percpu.c    	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
cpu               261 mm/percpu.c    static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
cpu               263 mm/percpu.c    	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
cpu               267 mm/percpu.c    				     unsigned int cpu, int page_idx)
cpu               270 mm/percpu.c    	       pcpu_unit_page_offset(cpu, page_idx);
cpu              1596 mm/percpu.c    	int slot, off, cpu, ret;
cpu              1733 mm/percpu.c    	for_each_possible_cpu(cpu)
cpu              1734 mm/percpu.c    		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
cpu              2002 mm/percpu.c    	unsigned int cpu;
cpu              2004 mm/percpu.c    	for_each_possible_cpu(cpu) {
cpu              2005 mm/percpu.c    		void *start = per_cpu_ptr(base, cpu);
cpu              2066 mm/percpu.c    	unsigned int cpu;
cpu              2084 mm/percpu.c    		for_each_possible_cpu(cpu) {
cpu              2085 mm/percpu.c    			void *start = per_cpu_ptr(base, cpu);
cpu              2280 mm/percpu.c    	unsigned int cpu;
cpu              2341 mm/percpu.c    	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
cpu              2342 mm/percpu.c    		unit_map[cpu] = UINT_MAX;
cpu              2354 mm/percpu.c    			cpu = gi->cpu_map[i];
cpu              2355 mm/percpu.c    			if (cpu == NR_CPUS)
cpu              2358 mm/percpu.c    			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
cpu              2359 mm/percpu.c    			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
cpu              2360 mm/percpu.c    			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
cpu              2362 mm/percpu.c    			unit_map[cpu] = unit + i;
cpu              2363 mm/percpu.c    			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
cpu              2367 mm/percpu.c    			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
cpu              2368 mm/percpu.c    				pcpu_low_unit_cpu = cpu;
cpu              2370 mm/percpu.c    			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
cpu              2371 mm/percpu.c    				pcpu_high_unit_cpu = cpu;
cpu              2376 mm/percpu.c    	for_each_possible_cpu(cpu)
cpu              2377 mm/percpu.c    		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
cpu              2541 mm/percpu.c    	unsigned int cpu, tcpu;
cpu              2570 mm/percpu.c    	for_each_possible_cpu(cpu) {
cpu              2574 mm/percpu.c    			if (cpu == tcpu)
cpu              2577 mm/percpu.c    			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
cpu              2578 mm/percpu.c    			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
cpu              2584 mm/percpu.c    		group_map[cpu] = group;
cpu              2653 mm/percpu.c    		for_each_possible_cpu(cpu)
cpu              2654 mm/percpu.c    			if (group_map[cpu] == group)
cpu              2655 mm/percpu.c    				gi->cpu_map[gi->nr_units++] = cpu;
cpu              2729 mm/percpu.c    		unsigned int cpu = NR_CPUS;
cpu              2732 mm/percpu.c    		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
cpu              2733 mm/percpu.c    			cpu = gi->cpu_map[i];
cpu              2734 mm/percpu.c    		BUG_ON(cpu == NR_CPUS);
cpu              2737 mm/percpu.c    		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
cpu              2868 mm/percpu.c    		unsigned int cpu = ai->groups[0].cpu_map[unit];
cpu              2872 mm/percpu.c    			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
cpu              2875 mm/percpu.c    						psize_str, cpu);
cpu              2949 mm/percpu.c    static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
cpu              2963 mm/percpu.c    	unsigned int cpu;
cpu              2977 mm/percpu.c    	for_each_possible_cpu(cpu)
cpu              2978 mm/percpu.c    		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
cpu               484 mm/slab.c      static void init_reap_node(int cpu)
cpu               486 mm/slab.c      	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
cpu               499 mm/slab.c      #define init_reap_node(cpu) do { } while (0)
cpu               510 mm/slab.c      static void start_cpu_timer(int cpu)
cpu               512 mm/slab.c      	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
cpu               515 mm/slab.c      		init_reap_node(cpu);
cpu               517 mm/slab.c      		schedule_delayed_work_on(cpu, reap_work,
cpu               518 mm/slab.c      					__round_jiffies_relative(HZ, cpu));
cpu               940 mm/slab.c      static void cpuup_canceled(long cpu)
cpu               944 mm/slab.c      	int node = cpu_to_mem(cpu);
cpu               963 mm/slab.c      		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
cpu              1006 mm/slab.c      static int cpuup_prepare(long cpu)
cpu              1009 mm/slab.c      	int node = cpu_to_mem(cpu);
cpu              1034 mm/slab.c      	cpuup_canceled(cpu);
cpu              1038 mm/slab.c      int slab_prepare_cpu(unsigned int cpu)
cpu              1043 mm/slab.c      	err = cpuup_prepare(cpu);
cpu              1058 mm/slab.c      int slab_dead_cpu(unsigned int cpu)
cpu              1061 mm/slab.c      	cpuup_canceled(cpu);
cpu              1067 mm/slab.c      static int slab_online_cpu(unsigned int cpu)
cpu              1069 mm/slab.c      	start_cpu_timer(cpu);
cpu              1073 mm/slab.c      static int slab_offline_cpu(unsigned int cpu)
cpu              1081 mm/slab.c      	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
cpu              1083 mm/slab.c      	per_cpu(slab_reap_work, cpu).work.func = NULL;
cpu              1724 mm/slab.c      	int cpu;
cpu              1734 mm/slab.c      	for_each_possible_cpu(cpu) {
cpu              1735 mm/slab.c      		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
cpu              3802 mm/slab.c      	int cpu;
cpu              3825 mm/slab.c      	for_each_online_cpu(cpu) {
cpu              3829 mm/slab.c      		struct array_cache *ac = per_cpu_ptr(prev, cpu);
cpu              3831 mm/slab.c      		node = cpu_to_mem(cpu);
cpu               209 mm/slub.c      	int cpu;		/* Was running on cpu */
cpu               566 mm/slub.c      		p->cpu = smp_processor_id();
cpu               589 mm/slub.c      	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
cpu              1996 mm/slub.c      static inline unsigned int init_tid(int cpu)
cpu              1998 mm/slub.c      	return cpu;
cpu              2027 mm/slub.c      	int cpu;
cpu              2029 mm/slub.c      	for_each_possible_cpu(cpu)
cpu              2030 mm/slub.c      		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
cpu              2314 mm/slub.c      static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
cpu              2316 mm/slub.c      	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
cpu              2331 mm/slub.c      static bool has_cpu_slab(int cpu, void *info)
cpu              2334 mm/slub.c      	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
cpu              2348 mm/slub.c      static int slub_cpu_dead(unsigned int cpu)
cpu              2356 mm/slub.c      		__flush_cpu_slab(s, cpu);
cpu              4577 mm/slub.c      				cpumask_set_cpu(track->cpu,
cpu              4609 mm/slub.c      	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
cpu              4819 mm/slub.c      		int cpu;
cpu              4821 mm/slub.c      		for_each_possible_cpu(cpu) {
cpu              4823 mm/slub.c      							       cpu);
cpu              5075 mm/slub.c      	int cpu;
cpu              5078 mm/slub.c      	for_each_online_cpu(cpu) {
cpu              5081 mm/slub.c      		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
cpu              5092 mm/slub.c      	for_each_online_cpu(cpu) {
cpu              5095 mm/slub.c      		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
cpu              5098 mm/slub.c      			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
cpu              5364 mm/slub.c      	int cpu;
cpu              5371 mm/slub.c      	for_each_online_cpu(cpu) {
cpu              5372 mm/slub.c      		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
cpu              5374 mm/slub.c      		data[cpu] = x;
cpu              5381 mm/slub.c      	for_each_online_cpu(cpu) {
cpu              5382 mm/slub.c      		if (data[cpu] && len < PAGE_SIZE - 20)
cpu              5383 mm/slub.c      			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
cpu              5392 mm/slub.c      	int cpu;
cpu              5394 mm/slub.c      	for_each_online_cpu(cpu)
cpu              5395 mm/slub.c      		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
cpu               294 mm/swap.c      static void activate_page_drain(int cpu)
cpu               296 mm/swap.c      	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
cpu               302 mm/swap.c      static bool need_activate_page_drain(int cpu)
cpu               304 mm/swap.c      	return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
cpu               321 mm/swap.c      static inline void activate_page_drain(int cpu)
cpu               589 mm/swap.c      void lru_add_drain_cpu(int cpu)
cpu               591 mm/swap.c      	struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
cpu               596 mm/swap.c      	pvec = &per_cpu(lru_rotate_pvecs, cpu);
cpu               606 mm/swap.c      	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
cpu               610 mm/swap.c      	pvec = &per_cpu(lru_deactivate_pvecs, cpu);
cpu               614 mm/swap.c      	pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
cpu               618 mm/swap.c      	activate_page_drain(cpu);
cpu               713 mm/swap.c      	int cpu;
cpu               725 mm/swap.c      	for_each_online_cpu(cpu) {
cpu               726 mm/swap.c      		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
cpu               728 mm/swap.c      		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
cpu               729 mm/swap.c      		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
cpu               730 mm/swap.c      		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
cpu               731 mm/swap.c      		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
cpu               732 mm/swap.c      		    pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
cpu               733 mm/swap.c      		    need_activate_page_drain(cpu)) {
cpu               735 mm/swap.c      			queue_work_on(cpu, mm_percpu_wq, work);
cpu               736 mm/swap.c      			cpumask_set_cpu(cpu, &has_work);
cpu               740 mm/swap.c      	for_each_cpu(cpu, &has_work)
cpu               741 mm/swap.c      		flush_work(&per_cpu(lru_add_drain_work, cpu));
cpu               115 mm/swap_slots.c static int alloc_swap_slot_cache(unsigned int cpu)
cpu               138 mm/swap_slots.c 	cache = &per_cpu(swp_slots, cpu);
cpu               170 mm/swap_slots.c static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
cpu               176 mm/swap_slots.c 	cache = &per_cpu(swp_slots, cpu);
cpu               204 mm/swap_slots.c 	unsigned int cpu;
cpu               229 mm/swap_slots.c 	for_each_online_cpu(cpu)
cpu               230 mm/swap_slots.c 		drain_slots_cache_cpu(cpu, type, false);
cpu               233 mm/swap_slots.c static int free_slot_cache(unsigned int cpu)
cpu               236 mm/swap_slots.c 	drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
cpu              3193 mm/swapfile.c  		int cpu;
cpu              3219 mm/swapfile.c  		for_each_possible_cpu(cpu) {
cpu              3221 mm/swapfile.c  			cluster = per_cpu_ptr(p->percpu_cluster, cpu);
cpu              1530 mm/vmalloc.c   static void purge_fragmented_blocks(int cpu)
cpu              1535 mm/vmalloc.c   	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
cpu              1567 mm/vmalloc.c   	int cpu;
cpu              1569 mm/vmalloc.c   	for_each_possible_cpu(cpu)
cpu              1570 mm/vmalloc.c   		purge_fragmented_blocks(cpu);
cpu              1672 mm/vmalloc.c   	int cpu;
cpu              1679 mm/vmalloc.c   	for_each_possible_cpu(cpu) {
cpu              1680 mm/vmalloc.c   		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
cpu              4061 mm/vmscan.c    static int kswapd_cpu_online(unsigned int cpu)
cpu                42 mm/vmstat.c    	int item, cpu;
cpu                46 mm/vmstat.c    		for_each_online_cpu(cpu)
cpu                47 mm/vmstat.c    			per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
cpu               113 mm/vmstat.c    	int cpu;
cpu               118 mm/vmstat.c    	for_each_online_cpu(cpu) {
cpu               119 mm/vmstat.c    		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
cpu               145 mm/vmstat.c    void vm_events_fold_cpu(int cpu)
cpu               147 mm/vmstat.c    	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
cpu               250 mm/vmstat.c    	int cpu;
cpu               255 mm/vmstat.c    		for_each_online_cpu(cpu) {
cpu               256 mm/vmstat.c    			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
cpu               266 mm/vmstat.c    		for_each_online_cpu(cpu) {
cpu               269 mm/vmstat.c    			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
cpu               273 mm/vmstat.c    			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
cpu               274 mm/vmstat.c    			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
cpu               295 mm/vmstat.c    	int cpu;
cpu               305 mm/vmstat.c    		for_each_online_cpu(cpu)
cpu               306 mm/vmstat.c    			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
cpu               847 mm/vmstat.c    void cpu_vm_stats_fold(int cpu)
cpu               861 mm/vmstat.c    		p = per_cpu_ptr(zone->pageset, cpu);
cpu               889 mm/vmstat.c    		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
cpu              1831 mm/vmstat.c    static bool need_update(int cpu)
cpu              1836 mm/vmstat.c    		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
cpu              1895 mm/vmstat.c    	int cpu;
cpu              1899 mm/vmstat.c    	for_each_online_cpu(cpu) {
cpu              1900 mm/vmstat.c    		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
cpu              1902 mm/vmstat.c    		if (!delayed_work_pending(dw) && need_update(cpu))
cpu              1903 mm/vmstat.c    			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
cpu              1913 mm/vmstat.c    	int cpu;
cpu              1915 mm/vmstat.c    	for_each_possible_cpu(cpu)
cpu              1916 mm/vmstat.c    		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
cpu              1933 mm/vmstat.c    static int vmstat_cpu_online(unsigned int cpu)
cpu              1936 mm/vmstat.c    	node_set_state(cpu_to_node(cpu), N_CPU);
cpu              1940 mm/vmstat.c    static int vmstat_cpu_down_prep(unsigned int cpu)
cpu              1942 mm/vmstat.c    	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
cpu              1946 mm/vmstat.c    static int vmstat_cpu_dead(unsigned int cpu)
cpu              1951 mm/vmstat.c    	node = cpu_to_node(cpu);
cpu               120 mm/z3fold.c    	short cpu;
cpu               324 mm/z3fold.c    	zhdr->cpu = -1;
cpu               548 mm/z3fold.c    		zhdr->cpu = smp_processor_id();
cpu               689 mm/z3fold.c    		zhdr->cpu = -1;
cpu               714 mm/z3fold.c    		int cpu;
cpu               717 mm/z3fold.c    		for_each_online_cpu(cpu) {
cpu               720 mm/z3fold.c    			unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
cpu               733 mm/z3fold.c    			zhdr->cpu = -1;
cpu               769 mm/z3fold.c    	int i, cpu;
cpu               784 mm/z3fold.c    	for_each_possible_cpu(cpu) {
cpu               786 mm/z3fold.c    				per_cpu_ptr(pool->unbuddied, cpu);
cpu              1060 mm/z3fold.c    	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
cpu              1064 mm/z3fold.c    		zhdr->cpu = -1;
cpu              1071 mm/z3fold.c    	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
cpu              1158 mm/z3fold.c    			zhdr->cpu = -1;
cpu              1423 mm/z3fold.c    	new_zhdr->cpu = smp_processor_id();
cpu              1430 mm/z3fold.c    	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
cpu              1238 mm/zsmalloc.c  static int zs_cpu_prepare(unsigned int cpu)
cpu              1242 mm/zsmalloc.c  	area = &per_cpu(zs_map_area, cpu);
cpu              1246 mm/zsmalloc.c  static int zs_cpu_dead(unsigned int cpu)
cpu              1250 mm/zsmalloc.c  	area = &per_cpu(zs_map_area, cpu);
cpu               375 mm/zswap.c     static int zswap_dstmem_prepare(unsigned int cpu)
cpu               379 mm/zswap.c     	dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
cpu               383 mm/zswap.c     	per_cpu(zswap_dstmem, cpu) = dst;
cpu               387 mm/zswap.c     static int zswap_dstmem_dead(unsigned int cpu)
cpu               391 mm/zswap.c     	dst = per_cpu(zswap_dstmem, cpu);
cpu               393 mm/zswap.c     	per_cpu(zswap_dstmem, cpu) = NULL;
cpu               398 mm/zswap.c     static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
cpu               403 mm/zswap.c     	if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
cpu               412 mm/zswap.c     	*per_cpu_ptr(pool->tfm, cpu) = tfm;
cpu               416 mm/zswap.c     static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
cpu               421 mm/zswap.c     	tfm = *per_cpu_ptr(pool->tfm, cpu);
cpu               424 mm/zswap.c     	*per_cpu_ptr(pool->tfm, cpu) = NULL;
cpu               105 net/batman-adv/soft-interface.c 	int cpu;
cpu               107 net/batman-adv/soft-interface.c 	for_each_possible_cpu(cpu) {
cpu               108 net/batman-adv/soft-interface.c 		counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
cpu               201 net/bridge/br_device.c 	unsigned int cpu;
cpu               203 net/bridge/br_device.c 	for_each_possible_cpu(cpu) {
cpu               206 net/bridge/br_device.c 			= per_cpu_ptr(br->stats, cpu);
cpu                39 net/bridge/netfilter/ebtables.c #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
cpu                40 net/bridge/netfilter/ebtables.c 				 COUNTER_OFFSET(n) * cpu))
cpu               949 net/bridge/netfilter/ebtables.c 	int i, cpu;
cpu               957 net/bridge/netfilter/ebtables.c 	for_each_possible_cpu(cpu) {
cpu               958 net/bridge/netfilter/ebtables.c 		if (cpu == 0)
cpu               960 net/bridge/netfilter/ebtables.c 		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
cpu              2100 net/core/dev.c 				 int cpu, u16 offset, u16 count)
cpu              2106 net/core/dev.c 	for (tci = cpu * num_tc; num_tc--; tci++) {
cpu              3761 net/core/dev.c 		int cpu = smp_processor_id(); /* ok because BHs are off */
cpu              3763 net/core/dev.c 		if (txq->xmit_lock_owner != cpu) {
cpu              3771 net/core/dev.c 			HARD_TX_LOCK(dev, txq, cpu);
cpu              3938 net/core/dev.c 	rflow->cpu = next_cpu;
cpu              3954 net/core/dev.c 	int cpu = -1;
cpu              4000 net/core/dev.c 		tcpu = rflow->cpu;
cpu              4023 net/core/dev.c 			cpu = tcpu;
cpu              4033 net/core/dev.c 			cpu = tcpu;
cpu              4039 net/core/dev.c 	return cpu;
cpu              4062 net/core/dev.c 	unsigned int cpu;
cpu              4068 net/core/dev.c 		cpu = READ_ONCE(rflow->cpu);
cpu              4069 net/core/dev.c 		if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
cpu              4070 net/core/dev.c 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
cpu              4158 net/core/dev.c static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
cpu              4165 net/core/dev.c 	sd = &per_cpu(softnet_data, cpu);
cpu              4349 net/core/dev.c 	int cpu, rc;
cpu              4352 net/core/dev.c 	cpu = smp_processor_id();
cpu              4353 net/core/dev.c 	HARD_TX_LOCK(dev, txq, cpu);
cpu              4410 net/core/dev.c 		int cpu;
cpu              4415 net/core/dev.c 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
cpu              4416 net/core/dev.c 		if (cpu < 0)
cpu              4417 net/core/dev.c 			cpu = smp_processor_id();
cpu              4419 net/core/dev.c 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
cpu              5118 net/core/dev.c 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
cpu              5120 net/core/dev.c 		if (cpu >= 0) {
cpu              5121 net/core/dev.c 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
cpu              5151 net/core/dev.c 			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
cpu              5153 net/core/dev.c 			if (cpu >= 0) {
cpu              5156 net/core/dev.c 				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
cpu              5253 net/core/dev.c 	unsigned int cpu;
cpu              5257 net/core/dev.c 	for_each_online_cpu(cpu)
cpu              5258 net/core/dev.c 		queue_work_on(cpu, system_highpri_wq,
cpu              5259 net/core/dev.c 			      per_cpu_ptr(&flush_works, cpu));
cpu              5261 net/core/dev.c 	for_each_online_cpu(cpu)
cpu              5262 net/core/dev.c 		flush_work(per_cpu_ptr(&flush_works, cpu));
cpu              5812 net/core/dev.c 		if (cpu_online(remsd->cpu))
cpu              5813 net/core/dev.c 			smp_call_function_single_async(remsd->cpu, &remsd->csd);
cpu              9828 net/core/dev.c 	unsigned int cpu;
cpu              9832 net/core/dev.c 	cpu = smp_processor_id();
cpu              9833 net/core/dev.c 	sd = &per_cpu(softnet_data, cpu);
cpu              10206 net/core/dev.c 		sd->cpu = i;
cpu               977 net/core/drop_monitor.c 	int cpu;
cpu               991 net/core/drop_monitor.c 	for_each_possible_cpu(cpu) {
cpu               992 net/core/drop_monitor.c 		struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
cpu              1008 net/core/drop_monitor.c 	int cpu;
cpu              1022 net/core/drop_monitor.c 	for_each_possible_cpu(cpu) {
cpu              1023 net/core/drop_monitor.c 		struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
cpu              1043 net/core/drop_monitor.c 	int cpu, rc;
cpu              1052 net/core/drop_monitor.c 	for_each_possible_cpu(cpu) {
cpu              1053 net/core/drop_monitor.c 		struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
cpu              1091 net/core/drop_monitor.c 	int cpu;
cpu              1103 net/core/drop_monitor.c 	for_each_possible_cpu(cpu) {
cpu              1104 net/core/drop_monitor.c 		struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
cpu              1343 net/core/drop_monitor.c 	int cpu;
cpu              1346 net/core/drop_monitor.c 	for_each_possible_cpu(cpu) {
cpu              1347 net/core/drop_monitor.c 		struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
cpu              1387 net/core/drop_monitor.c 	int cpu;
cpu              1390 net/core/drop_monitor.c 	for_each_possible_cpu(cpu) {
cpu              1391 net/core/drop_monitor.c 		struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
cpu              1596 net/core/drop_monitor.c static void net_dm_cpu_data_init(int cpu)
cpu              1600 net/core/drop_monitor.c 	data = &per_cpu(dm_cpu_data, cpu);
cpu              1604 net/core/drop_monitor.c static void net_dm_cpu_data_fini(int cpu)
cpu              1608 net/core/drop_monitor.c 	data = &per_cpu(dm_cpu_data, cpu);
cpu              1616 net/core/drop_monitor.c static void net_dm_hw_cpu_data_init(int cpu)
cpu              1620 net/core/drop_monitor.c 	hw_data = &per_cpu(dm_hw_cpu_data, cpu);
cpu              1624 net/core/drop_monitor.c static void net_dm_hw_cpu_data_fini(int cpu)
cpu              1628 net/core/drop_monitor.c 	hw_data = &per_cpu(dm_hw_cpu_data, cpu);
cpu              1635 net/core/drop_monitor.c 	int cpu, rc;
cpu              1659 net/core/drop_monitor.c 	for_each_possible_cpu(cpu) {
cpu              1660 net/core/drop_monitor.c 		net_dm_cpu_data_init(cpu);
cpu              1661 net/core/drop_monitor.c 		net_dm_hw_cpu_data_init(cpu);
cpu              1674 net/core/drop_monitor.c 	int cpu;
cpu              1683 net/core/drop_monitor.c 	for_each_possible_cpu(cpu) {
cpu              1684 net/core/drop_monitor.c 		net_dm_hw_cpu_data_fini(cpu);
cpu              1685 net/core/drop_monitor.c 		net_dm_cpu_data_fini(cpu);
cpu               303 net/core/dst.c 	int cpu;
cpu               311 net/core/dst.c 	for_each_possible_cpu(cpu)
cpu               312 net/core/dst.c 		__metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
cpu               321 net/core/dst.c 	int cpu;
cpu               323 net/core/dst.c 	for_each_possible_cpu(cpu) {
cpu               324 net/core/dst.c 		struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu);
cpu              3595 net/core/filter.c 	int cpu;
cpu              3597 net/core/filter.c 	for_each_possible_cpu(cpu) {
cpu              3598 net/core/filter.c 		ri = per_cpu_ptr(&bpf_redirect_info, cpu);
cpu               119 net/core/gen_stats.c 			    struct gnet_stats_basic_cpu __percpu *cpu)
cpu               124 net/core/gen_stats.c 		struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
cpu               143 net/core/gen_stats.c 			struct gnet_stats_basic_cpu __percpu *cpu,
cpu               148 net/core/gen_stats.c 	if (cpu) {
cpu               149 net/core/gen_stats.c 		__gnet_stats_copy_basic_cpu(bstats, cpu);
cpu               164 net/core/gen_stats.c 			 struct gnet_stats_basic_cpu __percpu *cpu,
cpu               170 net/core/gen_stats.c 	__gnet_stats_copy_basic(running, &bstats, cpu, b);
cpu               205 net/core/gen_stats.c 		      struct gnet_stats_basic_cpu __percpu *cpu,
cpu               208 net/core/gen_stats.c 	return ___gnet_stats_copy_basic(running, d, cpu, b,
cpu               229 net/core/gen_stats.c 			 struct gnet_stats_basic_cpu __percpu *cpu,
cpu               232 net/core/gen_stats.c 	return ___gnet_stats_copy_basic(running, d, cpu, b,
cpu               299 net/core/gen_stats.c 			     const struct gnet_stats_queue __percpu *cpu,
cpu               303 net/core/gen_stats.c 	if (cpu) {
cpu               304 net/core/gen_stats.c 		__gnet_stats_copy_queue_cpu(qstats, cpu);
cpu              2075 net/core/neighbour.c 		int cpu;
cpu              2080 net/core/neighbour.c 		for_each_possible_cpu(cpu) {
cpu              2083 net/core/neighbour.c 			st = per_cpu_ptr(tbl->stats, cpu);
cpu              3268 net/core/neighbour.c 	int cpu;
cpu              3273 net/core/neighbour.c 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
cpu              3274 net/core/neighbour.c 		if (!cpu_possible(cpu))
cpu              3276 net/core/neighbour.c 		*pos = cpu+1;
cpu              3277 net/core/neighbour.c 		return per_cpu_ptr(tbl->stats, cpu);
cpu              3285 net/core/neighbour.c 	int cpu;
cpu              3287 net/core/neighbour.c 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
cpu              3288 net/core/neighbour.c 		if (!cpu_possible(cpu))
cpu              3290 net/core/neighbour.c 		*pos = cpu+1;
cpu              3291 net/core/neighbour.c 		return per_cpu_ptr(tbl->stats, cpu);
cpu               713 net/core/net-sysfs.c 	int err, cpu, i;
cpu               737 net/core/net-sysfs.c 	for_each_cpu_and(cpu, mask, cpu_online_mask)
cpu               738 net/core/net-sysfs.c 		map->cpus[i++] = cpu;
cpu               830 net/core/net-sysfs.c 			table->flows[count].cpu = RPS_NO_CPU;
cpu              1239 net/core/net-sysfs.c 	int cpu, len, num_tc = 1, tc = 0;
cpu              1269 net/core/net-sysfs.c 		for_each_possible_cpu(cpu) {
cpu              1270 net/core/net-sysfs.c 			int i, tci = cpu * num_tc + tc;
cpu              1279 net/core/net-sysfs.c 					cpumask_set_cpu(cpu, mask);
cpu               162 net/core/netpoll.c 	int cpu = smp_processor_id();
cpu               165 net/core/netpoll.c 		if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
cpu               449 net/core/pktgen.c 	int cpu;
cpu              3466 net/core/pktgen.c 	int cpu = t->cpu;
cpu              3468 net/core/pktgen.c 	BUG_ON(smp_processor_id() != cpu);
cpu              3473 net/core/pktgen.c 	pr_debug("starting pktgen/%d:  pid=%d\n", cpu, task_pid_nr(current));
cpu              3594 net/core/pktgen.c 	int node = cpu_to_node(t->cpu);
cpu              3677 net/core/pktgen.c static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
cpu              3684 net/core/pktgen.c 			 cpu_to_node(cpu));
cpu              3691 net/core/pktgen.c 	t->cpu = cpu;
cpu              3700 net/core/pktgen.c 				   cpu_to_node(cpu),
cpu              3701 net/core/pktgen.c 				   "kpktgend_%d", cpu);
cpu              3703 net/core/pktgen.c 		pr_err("kernel_thread() failed for cpu %d\n", t->cpu);
cpu              3708 net/core/pktgen.c 	kthread_bind(p, cpu);
cpu              3787 net/core/pktgen.c 	int cpu, ret = 0;
cpu              3804 net/core/pktgen.c 	for_each_online_cpu(cpu) {
cpu              3807 net/core/pktgen.c 		err = pktgen_create_thread(cpu, pn);
cpu              3810 net/core/pktgen.c 				   cpu, err);
cpu              3240 net/core/sock.c 	int cpu, idx = prot->inuse_idx;
cpu              3243 net/core/sock.c 	for_each_possible_cpu(cpu)
cpu              3244 net/core/sock.c 		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
cpu              3257 net/core/sock.c 	int cpu, res = 0;
cpu              3259 net/core/sock.c 	for_each_possible_cpu(cpu)
cpu              3260 net/core/sock.c 		res += *per_cpu_ptr(net->core.sock_inuse, cpu);
cpu              1644 net/ipv4/af_inet.c u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
cpu              1646 net/ipv4/af_inet.c 	return  *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
cpu              1663 net/ipv4/af_inet.c u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
cpu              1671 net/ipv4/af_inet.c 	bhptr = per_cpu_ptr(mib, cpu);
cpu              1685 net/ipv4/af_inet.c 	int cpu;
cpu              1687 net/ipv4/af_inet.c 	for_each_possible_cpu(cpu) {
cpu              1688 net/ipv4/af_inet.c 		res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
cpu               192 net/ipv4/fib_semantics.c 	int cpu;
cpu               197 net/ipv4/fib_semantics.c 	for_each_possible_cpu(cpu) {
cpu               200 net/ipv4/fib_semantics.c 		rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
cpu              2412 net/ipv4/fib_trie.c 	int cpu;
cpu              2415 net/ipv4/fib_trie.c 	for_each_possible_cpu(cpu) {
cpu              2416 net/ipv4/fib_trie.c 		const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
cpu               193 net/ipv4/netfilter/arp_tables.c 	unsigned int cpu, stackidx = 0;
cpu               207 net/ipv4/netfilter/arp_tables.c 	cpu     = smp_processor_id();
cpu               209 net/ipv4/netfilter/arp_tables.c 	jumpstack  = (struct arpt_entry **)private->jumpstack[cpu];
cpu               603 net/ipv4/netfilter/arp_tables.c 	unsigned int cpu;
cpu               606 net/ipv4/netfilter/arp_tables.c 	for_each_possible_cpu(cpu) {
cpu               607 net/ipv4/netfilter/arp_tables.c 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
cpu               615 net/ipv4/netfilter/arp_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
cpu               633 net/ipv4/netfilter/arp_tables.c 	unsigned int cpu, i;
cpu               635 net/ipv4/netfilter/arp_tables.c 	for_each_possible_cpu(cpu) {
cpu               640 net/ipv4/netfilter/arp_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
cpu               237 net/ipv4/netfilter/ip_tables.c 	unsigned int stackidx, cpu;
cpu               262 net/ipv4/netfilter/ip_tables.c 	cpu        = smp_processor_id();
cpu               264 net/ipv4/netfilter/ip_tables.c 	jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
cpu               744 net/ipv4/netfilter/ip_tables.c 	unsigned int cpu;
cpu               747 net/ipv4/netfilter/ip_tables.c 	for_each_possible_cpu(cpu) {
cpu               748 net/ipv4/netfilter/ip_tables.c 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
cpu               756 net/ipv4/netfilter/ip_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
cpu               774 net/ipv4/netfilter/ip_tables.c 	unsigned int cpu, i;
cpu               776 net/ipv4/netfilter/ip_tables.c 	for_each_possible_cpu(cpu) {
cpu               781 net/ipv4/netfilter/ip_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
cpu               250 net/ipv4/route.c 	int cpu;
cpu               255 net/ipv4/route.c 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
cpu               256 net/ipv4/route.c 		if (!cpu_possible(cpu))
cpu               258 net/ipv4/route.c 		*pos = cpu+1;
cpu               259 net/ipv4/route.c 		return &per_cpu(rt_cache_stat, cpu);
cpu               266 net/ipv4/route.c 	int cpu;
cpu               268 net/ipv4/route.c 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
cpu               269 net/ipv4/route.c 		if (!cpu_possible(cpu))
cpu               271 net/ipv4/route.c 		*pos = cpu+1;
cpu               272 net/ipv4/route.c 		return &per_cpu(rt_cache_stat, cpu);
cpu              1536 net/ipv4/route.c 	int cpu;
cpu              1538 net/ipv4/route.c 	for_each_possible_cpu(cpu) {
cpu              1539 net/ipv4/route.c 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
cpu              3454 net/ipv4/route.c 	int cpu;
cpu              3467 net/ipv4/route.c 	for_each_possible_cpu(cpu) {
cpu              3468 net/ipv4/route.c 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
cpu              3730 net/ipv4/tcp.c 	int cpu;
cpu              3736 net/ipv4/tcp.c 	for_each_possible_cpu(cpu) {
cpu              3737 net/ipv4/tcp.c 		void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch;
cpu              3744 net/ipv4/tcp.c 					       cpu_to_node(cpu));
cpu              3747 net/ipv4/tcp.c 			per_cpu(tcp_md5sig_pool, cpu).scratch = scratch;
cpu              3749 net/ipv4/tcp.c 		if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
cpu              3758 net/ipv4/tcp.c 		per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
cpu              2622 net/ipv4/tcp_ipv4.c 	int cpu;
cpu              2627 net/ipv4/tcp_ipv4.c 	for_each_possible_cpu(cpu)
cpu              2628 net/ipv4/tcp_ipv4.c 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
cpu              2634 net/ipv4/tcp_ipv4.c 	int res, cpu, cnt;
cpu              2640 net/ipv4/tcp_ipv4.c 	for_each_possible_cpu(cpu) {
cpu              2654 net/ipv4/tcp_ipv4.c 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
cpu              6401 net/ipv6/addrconf.c 			int cpu;
cpu              6406 net/ipv6/addrconf.c 				for_each_possible_cpu(cpu) {
cpu              6409 net/ipv6/addrconf.c 					rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
cpu               904 net/ipv6/ip6_fib.c 	int cpu;
cpu               912 net/ipv6/ip6_fib.c 	for_each_possible_cpu(cpu) {
cpu               916 net/ipv6/ip6_fib.c 		ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
cpu               261 net/ipv6/netfilter/ip6_tables.c 	unsigned int stackidx, cpu;
cpu               284 net/ipv6/netfilter/ip6_tables.c 	cpu        = smp_processor_id();
cpu               286 net/ipv6/netfilter/ip6_tables.c 	jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
cpu               761 net/ipv6/netfilter/ip6_tables.c 	unsigned int cpu;
cpu               764 net/ipv6/netfilter/ip6_tables.c 	for_each_possible_cpu(cpu) {
cpu               765 net/ipv6/netfilter/ip6_tables.c 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
cpu               773 net/ipv6/netfilter/ip6_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
cpu               791 net/ipv6/netfilter/ip6_tables.c 	unsigned int cpu, i;
cpu               793 net/ipv6/netfilter/ip6_tables.c 	for_each_possible_cpu(cpu) {
cpu               798 net/ipv6/netfilter/ip6_tables.c 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
cpu               160 net/ipv6/route.c 	int cpu;
cpu               165 net/ipv6/route.c 	for_each_possible_cpu(cpu) {
cpu               166 net/ipv6/route.c 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
cpu              3553 net/ipv6/route.c 		int cpu;
cpu              3555 net/ipv6/route.c 		for_each_possible_cpu(cpu) {
cpu              3559 net/ipv6/route.c 			ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
cpu              6362 net/ipv6/route.c 	int cpu;
cpu              6421 net/ipv6/route.c 	for_each_possible_cpu(cpu) {
cpu              6422 net/ipv6/route.c 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
cpu               358 net/ipv6/seg6_hmac.c 	int i, alg_count, cpu;
cpu               371 net/ipv6/seg6_hmac.c 		for_each_possible_cpu(cpu) {
cpu               375 net/ipv6/seg6_hmac.c 			p_tfm = per_cpu_ptr(algo->tfms, cpu);
cpu               388 net/ipv6/seg6_hmac.c 		for_each_possible_cpu(cpu) {
cpu               390 net/ipv6/seg6_hmac.c 					     cpu_to_node(cpu));
cpu               393 net/ipv6/seg6_hmac.c 			*per_cpu_ptr(algo->shashs, cpu) = shash;
cpu               419 net/ipv6/seg6_hmac.c 	int i, alg_count, cpu;
cpu               424 net/ipv6/seg6_hmac.c 		for_each_possible_cpu(cpu) {
cpu               428 net/ipv6/seg6_hmac.c 			shash = *per_cpu_ptr(algo->shashs, cpu);
cpu               430 net/ipv6/seg6_hmac.c 			tfm = *per_cpu_ptr(algo->tfms, cpu);
cpu               384 net/iucv/iucv.c 	int cpu = smp_processor_id();
cpu               396 net/iucv/iucv.c 	parm = iucv_param_irq[cpu];
cpu               414 net/iucv/iucv.c 	cpumask_set_cpu(cpu, &iucv_irq_cpumask);
cpu               425 net/iucv/iucv.c 	int cpu = smp_processor_id();
cpu               429 net/iucv/iucv.c 	parm = iucv_param_irq[cpu];
cpu               434 net/iucv/iucv.c 	cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
cpu               445 net/iucv/iucv.c 	int cpu = smp_processor_id();
cpu               449 net/iucv/iucv.c 	parm = iucv_param_irq[cpu];
cpu               459 net/iucv/iucv.c 	cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
cpu               470 net/iucv/iucv.c 	int cpu = smp_processor_id();
cpu               474 net/iucv/iucv.c 	if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
cpu               478 net/iucv/iucv.c 	parm = iucv_param_irq[cpu];
cpu               480 net/iucv/iucv.c 	parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
cpu               502 net/iucv/iucv.c 			cpu, rc, err);
cpu               507 net/iucv/iucv.c 	cpumask_set_cpu(cpu, &iucv_buffer_cpumask);
cpu               525 net/iucv/iucv.c 	int cpu = smp_processor_id();
cpu               528 net/iucv/iucv.c 	if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
cpu               535 net/iucv/iucv.c 	parm = iucv_param_irq[cpu];
cpu               539 net/iucv/iucv.c 	cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
cpu               549 net/iucv/iucv.c 	int cpu;
cpu               552 net/iucv/iucv.c 	for_each_online_cpu(cpu)
cpu               554 net/iucv/iucv.c 		if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
cpu               555 net/iucv/iucv.c 		    !cpumask_test_cpu(cpu, &iucv_irq_cpumask))
cpu               556 net/iucv/iucv.c 			smp_call_function_single(cpu, iucv_allow_cpu,
cpu               569 net/iucv/iucv.c 	int cpu;
cpu               574 net/iucv/iucv.c 	for_each_cpu(cpu, &cpumask)
cpu               575 net/iucv/iucv.c 		smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
cpu               589 net/iucv/iucv.c 	int cpu, rc;
cpu               599 net/iucv/iucv.c 	for_each_online_cpu(cpu)
cpu               600 net/iucv/iucv.c 		smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
cpu               629 net/iucv/iucv.c static int iucv_cpu_dead(unsigned int cpu)
cpu               631 net/iucv/iucv.c 	kfree(iucv_param_irq[cpu]);
cpu               632 net/iucv/iucv.c 	iucv_param_irq[cpu] = NULL;
cpu               633 net/iucv/iucv.c 	kfree(iucv_param[cpu]);
cpu               634 net/iucv/iucv.c 	iucv_param[cpu] = NULL;
cpu               635 net/iucv/iucv.c 	kfree(iucv_irq_data[cpu]);
cpu               636 net/iucv/iucv.c 	iucv_irq_data[cpu] = NULL;
cpu               640 net/iucv/iucv.c static int iucv_cpu_prepare(unsigned int cpu)
cpu               643 net/iucv/iucv.c 	iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
cpu               644 net/iucv/iucv.c 			     GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
cpu               645 net/iucv/iucv.c 	if (!iucv_irq_data[cpu])
cpu               649 net/iucv/iucv.c 	iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
cpu               650 net/iucv/iucv.c 			  GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
cpu               651 net/iucv/iucv.c 	if (!iucv_param[cpu])
cpu               654 net/iucv/iucv.c 	iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
cpu               655 net/iucv/iucv.c 			  GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
cpu               656 net/iucv/iucv.c 	if (!iucv_param_irq[cpu])
cpu               662 net/iucv/iucv.c 	iucv_cpu_dead(cpu);
cpu               666 net/iucv/iucv.c static int iucv_cpu_online(unsigned int cpu)
cpu               674 net/iucv/iucv.c static int iucv_cpu_down_prep(unsigned int cpu)
cpu               682 net/iucv/iucv.c 	cpumask_clear_cpu(cpu, &cpumask);
cpu              1885 net/iucv/iucv.c 	int cpu;
cpu              1893 net/iucv/iucv.c 		for_each_cpu(cpu, &iucv_irq_cpumask)
cpu              1894 net/iucv/iucv.c 			smp_call_function_single(cpu, iucv_block_cpu_almost,
cpu              2042 net/mac80211/sta_info.c 	int cpu;
cpu              2047 net/mac80211/sta_info.c 	for_each_possible_cpu(cpu) {
cpu              2050 net/mac80211/sta_info.c 		cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
cpu              2184 net/mac80211/sta_info.c 	int i, ac, cpu;
cpu              2237 net/mac80211/sta_info.c 			for_each_possible_cpu(cpu) {
cpu              2240 net/mac80211/sta_info.c 				cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
cpu              2251 net/mac80211/sta_info.c 			for_each_possible_cpu(cpu) {
cpu              2254 net/mac80211/sta_info.c 				cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
cpu              2290 net/mac80211/sta_info.c 		for_each_possible_cpu(cpu) {
cpu              2293 net/mac80211/sta_info.c 			cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
cpu                45 net/netfilter/nf_conncount.c 	int				cpu;
cpu               102 net/netfilter/nf_conncount.c 	int cpu = raw_smp_processor_id();
cpu               117 net/netfilter/nf_conncount.c 	if (conn->cpu == cpu || age >= 2) {
cpu               189 net/netfilter/nf_conncount.c 	conn->cpu = raw_smp_processor_id();
cpu               500 net/netfilter/nf_conntrack_core.c 	ct->cpu = smp_processor_id();
cpu               501 net/netfilter/nf_conntrack_core.c 	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
cpu               515 net/netfilter/nf_conntrack_core.c 	ct->cpu = smp_processor_id();
cpu               516 net/netfilter/nf_conntrack_core.c 	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
cpu               530 net/netfilter/nf_conntrack_core.c 	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
cpu              2135 net/netfilter/nf_conntrack_core.c 	int cpu;
cpu              2137 net/netfilter/nf_conntrack_core.c 	for_each_possible_cpu(cpu) {
cpu              2142 net/netfilter/nf_conntrack_core.c 		pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
cpu              2574 net/netfilter/nf_conntrack_core.c 	int cpu;
cpu              2584 net/netfilter/nf_conntrack_core.c 	for_each_possible_cpu(cpu) {
cpu              2585 net/netfilter/nf_conntrack_core.c 		struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
cpu                88 net/netfilter/nf_conntrack_ecache.c 	int cpu, delay = -1;
cpu                93 net/netfilter/nf_conntrack_ecache.c 	for_each_possible_cpu(cpu) {
cpu                96 net/netfilter/nf_conntrack_ecache.c 		pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu);
cpu              1404 net/netfilter/nf_conntrack_netlink.c 	int cpu;
cpu              1413 net/netfilter/nf_conntrack_netlink.c 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
cpu              1416 net/netfilter/nf_conntrack_netlink.c 		if (!cpu_possible(cpu))
cpu              1419 net/netfilter/nf_conntrack_netlink.c 		pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
cpu              1441 net/netfilter/nf_conntrack_netlink.c 				cb->args[0] = cpu;
cpu              2200 net/netfilter/nf_conntrack_netlink.c 				__u16 cpu, const struct ip_conntrack_stat *st)
cpu              2215 net/netfilter/nf_conntrack_netlink.c 	nfmsg->res_id	    = htons(cpu);
cpu              2242 net/netfilter/nf_conntrack_netlink.c 	int cpu;
cpu              2248 net/netfilter/nf_conntrack_netlink.c 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
cpu              2251 net/netfilter/nf_conntrack_netlink.c 		if (!cpu_possible(cpu))
cpu              2254 net/netfilter/nf_conntrack_netlink.c 		st = per_cpu_ptr(net->ct.stat, cpu);
cpu              2258 net/netfilter/nf_conntrack_netlink.c 						    cpu, st) < 0)
cpu              2261 net/netfilter/nf_conntrack_netlink.c 	cb->args[0] = cpu;
cpu              3430 net/netfilter/nf_conntrack_netlink.c ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
cpu              3446 net/netfilter/nf_conntrack_netlink.c 	nfmsg->res_id	    = htons(cpu);
cpu              3465 net/netfilter/nf_conntrack_netlink.c 	int cpu;
cpu              3471 net/netfilter/nf_conntrack_netlink.c 	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
cpu              3474 net/netfilter/nf_conntrack_netlink.c 		if (!cpu_possible(cpu))
cpu              3477 net/netfilter/nf_conntrack_netlink.c 		st = per_cpu_ptr(net->ct.stat, cpu);
cpu              3480 net/netfilter/nf_conntrack_netlink.c 						 cpu, st) < 0)
cpu              3483 net/netfilter/nf_conntrack_netlink.c 	cb->args[0] = cpu;
cpu               388 net/netfilter/nf_conntrack_standalone.c 	int cpu;
cpu               393 net/netfilter/nf_conntrack_standalone.c 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
cpu               394 net/netfilter/nf_conntrack_standalone.c 		if (!cpu_possible(cpu))
cpu               396 net/netfilter/nf_conntrack_standalone.c 		*pos = cpu + 1;
cpu               397 net/netfilter/nf_conntrack_standalone.c 		return per_cpu_ptr(net->ct.stat, cpu);
cpu               406 net/netfilter/nf_conntrack_standalone.c 	int cpu;
cpu               408 net/netfilter/nf_conntrack_standalone.c 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
cpu               409 net/netfilter/nf_conntrack_standalone.c 		if (!cpu_possible(cpu))
cpu               411 net/netfilter/nf_conntrack_standalone.c 		*pos = cpu + 1;
cpu               412 net/netfilter/nf_conntrack_standalone.c 		return per_cpu_ptr(net->ct.stat, cpu);
cpu               244 net/netfilter/nf_synproxy_core.c 	int cpu;
cpu               249 net/netfilter/nf_synproxy_core.c 	for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) {
cpu               250 net/netfilter/nf_synproxy_core.c 		if (!cpu_possible(cpu))
cpu               252 net/netfilter/nf_synproxy_core.c 		*pos = cpu + 1;
cpu               253 net/netfilter/nf_synproxy_core.c 		return per_cpu_ptr(snet->stats, cpu);
cpu               262 net/netfilter/nf_synproxy_core.c 	int cpu;
cpu               264 net/netfilter/nf_synproxy_core.c 	for (cpu = *pos; cpu < nr_cpu_ids; cpu++) {
cpu               265 net/netfilter/nf_synproxy_core.c 		if (!cpu_possible(cpu))
cpu               267 net/netfilter/nf_synproxy_core.c 		*pos = cpu + 1;
cpu               268 net/netfilter/nf_synproxy_core.c 		return per_cpu_ptr(snet->stats, cpu);
cpu              1224 net/netfilter/nf_tables_api.c 	int cpu;
cpu              1230 net/netfilter/nf_tables_api.c 	for_each_possible_cpu(cpu) {
cpu              1231 net/netfilter/nf_tables_api.c 		cpu_stats = per_cpu_ptr(stats, cpu);
cpu               123 net/netfilter/nft_counter.c 	int cpu;
cpu               126 net/netfilter/nft_counter.c 	for_each_possible_cpu(cpu) {
cpu               127 net/netfilter/nft_counter.c 		myseq = per_cpu_ptr(&nft_counter_seq, cpu);
cpu               128 net/netfilter/nft_counter.c 		this_cpu = per_cpu_ptr(priv->counter, cpu);
cpu               274 net/netfilter/nft_counter.c 	int cpu, err;
cpu               276 net/netfilter/nft_counter.c 	for_each_possible_cpu(cpu)
cpu               277 net/netfilter/nft_counter.c 		seqcount_init(per_cpu_ptr(&nft_counter_seq, cpu));
cpu               352 net/netfilter/nft_ct.c 	int cpu;
cpu               354 net/netfilter/nft_ct.c 	for_each_possible_cpu(cpu) {
cpu               355 net/netfilter/nft_ct.c 		ct = per_cpu(nft_ct_pcpu_template, cpu);
cpu               359 net/netfilter/nft_ct.c 		per_cpu(nft_ct_pcpu_template, cpu) = NULL;
cpu               367 net/netfilter/nft_ct.c 	int cpu;
cpu               372 net/netfilter/nft_ct.c 	for_each_possible_cpu(cpu) {
cpu               380 net/netfilter/nft_ct.c 		per_cpu(nft_ct_pcpu_template, cpu) = tmp;
cpu                38 net/netfilter/nft_queue.c 			int cpu = raw_smp_processor_id();
cpu                40 net/netfilter/nft_queue.c 			queue = priv->queuenum + cpu % priv->queues_total;
cpu              1188 net/netfilter/x_tables.c 	int cpu;
cpu              1191 net/netfilter/x_tables.c 		for_each_possible_cpu(cpu)
cpu              1192 net/netfilter/x_tables.c 			kvfree(info->jumpstack[cpu]);
cpu              1297 net/netfilter/x_tables.c 	int cpu;
cpu              1322 net/netfilter/x_tables.c 	for_each_possible_cpu(cpu) {
cpu              1323 net/netfilter/x_tables.c 		i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
cpu              1324 net/netfilter/x_tables.c 			cpu_to_node(cpu));
cpu              1325 net/netfilter/x_tables.c 		if (i->jumpstack[cpu] == NULL)
cpu              1359 net/netfilter/x_tables.c 	unsigned int cpu;
cpu              1399 net/netfilter/x_tables.c 	for_each_possible_cpu(cpu) {
cpu              1400 net/netfilter/x_tables.c 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
cpu                94 net/netfilter/xt_NFQUEUE.c 			int cpu = smp_processor_id();
cpu                96 net/netfilter/xt_NFQUEUE.c 			queue = info->queuenum + cpu % info->queues_total;
cpu                37 net/netfilter/xt_cpu.c 	return (info->cpu == smp_processor_id()) ^ info->invert;
cpu                63 net/openvswitch/flow.c 	unsigned int cpu = smp_processor_id();
cpu                66 net/openvswitch/flow.c 	stats = rcu_dereference(flow->stats[cpu]);
cpu                72 net/openvswitch/flow.c 		if (cpu == 0 && unlikely(flow->stats_last_writer != cpu))
cpu                73 net/openvswitch/flow.c 			flow->stats_last_writer = cpu;
cpu                81 net/openvswitch/flow.c 		if (unlikely(flow->stats_last_writer != cpu)) {
cpu                88 net/openvswitch/flow.c 			    likely(!rcu_access_pointer(flow->stats[cpu]))) {
cpu               106 net/openvswitch/flow.c 					rcu_assign_pointer(flow->stats[cpu],
cpu               108 net/openvswitch/flow.c 					cpumask_set_cpu(cpu, &flow->cpu_used_mask);
cpu               112 net/openvswitch/flow.c 			flow->stats_last_writer = cpu;
cpu               129 net/openvswitch/flow.c 	int cpu;
cpu               136 net/openvswitch/flow.c 	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
cpu               137 net/openvswitch/flow.c 		struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
cpu               157 net/openvswitch/flow.c 	int cpu;
cpu               160 net/openvswitch/flow.c 	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
cpu               161 net/openvswitch/flow.c 		struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
cpu               103 net/openvswitch/flow_table.c 	int cpu;
cpu               110 net/openvswitch/flow_table.c 	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
cpu               111 net/openvswitch/flow_table.c 		if (flow->stats[cpu])
cpu               113 net/openvswitch/flow_table.c 					(struct sw_flow_stats __force *)flow->stats[cpu]);
cpu               276 net/packet/af_packet.c 	int cpu = raw_smp_processor_id();
cpu               280 net/packet/af_packet.c 	skb->sender_cpu = cpu + 1;
cpu               282 net/packet/af_packet.c 	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
cpu              1169 net/packet/af_packet.c 	int cpu;
cpu              1175 net/packet/af_packet.c 	for_each_possible_cpu(cpu)
cpu              1176 net/packet/af_packet.c 		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
cpu               104 net/rds/ib_recv.c 	int cpu;
cpu               110 net/rds/ib_recv.c 	for_each_possible_cpu(cpu) {
cpu               111 net/rds/ib_recv.c 		head = per_cpu_ptr(cache->percpu, cpu);
cpu               139 net/rds/ib_recv.c 	int cpu;
cpu               141 net/rds/ib_recv.c 	for_each_possible_cpu(cpu) {
cpu               142 net/rds/ib_recv.c 		head = per_cpu_ptr(cache->percpu, cpu);
cpu                91 net/rds/ib_stats.c 	int cpu;
cpu                96 net/rds/ib_stats.c 	for_each_online_cpu(cpu) {
cpu                97 net/rds/ib_stats.c 		src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu));
cpu               155 net/rds/page.c 	unsigned int cpu;
cpu               157 net/rds/page.c 	for_each_possible_cpu(cpu) {
cpu               160 net/rds/page.c 		rem = &per_cpu(rds_page_remainders, cpu);
cpu               161 net/rds/page.c 		rdsdebug("cpu %u\n", cpu);
cpu               119 net/rds/stats.c 	int cpu;
cpu               129 net/rds/stats.c 	for_each_online_cpu(cpu) {
cpu               130 net/rds/stats.c 		src = (uint64_t *)&(per_cpu(rds_stats, cpu));
cpu                58 net/rds/tcp_stats.c 	int cpu;
cpu                63 net/rds/tcp_stats.c 	for_each_online_cpu(cpu) {
cpu                64 net/rds/tcp_stats.c 		src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu));
cpu               285 net/sched/cls_basic.c 	int cpu;
cpu               300 net/sched/cls_basic.c 	for_each_possible_cpu(cpu) {
cpu               301 net/sched/cls_basic.c 		struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu);
cpu               350 net/sched/cls_matchall.c 	int cpu;
cpu               371 net/sched/cls_matchall.c 	for_each_possible_cpu(cpu) {
cpu               372 net/sched/cls_matchall.c 		struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
cpu              1296 net/sched/cls_u32.c 		int cpu;
cpu              1356 net/sched/cls_u32.c 		for_each_possible_cpu(cpu) {
cpu              1358 net/sched/cls_u32.c 			struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
cpu               170 net/sunrpc/svc.c 	unsigned int cpu;
cpu               177 net/sunrpc/svc.c 	for_each_online_cpu(cpu) {
cpu               179 net/sunrpc/svc.c 		m->to_pool[cpu] = pidx;
cpu               180 net/sunrpc/svc.c 		m->pool_to[pidx] = cpu;
cpu               336 net/sunrpc/svc.c svc_pool_for_cpu(struct svc_serv *serv, int cpu)
cpu               349 net/sunrpc/svc.c 			pidx = m->to_pool[cpu];
cpu               352 net/sunrpc/svc.c 			pidx = m->to_pool[cpu_to_node(cpu)];
cpu               401 net/sunrpc/svc_xprt.c 	int cpu;
cpu               414 net/sunrpc/svc_xprt.c 	cpu = get_cpu();
cpu               415 net/sunrpc/svc_xprt.c 	pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
cpu               541 net/xfrm/xfrm_interface.c 	int cpu;
cpu               543 net/xfrm/xfrm_interface.c 	for_each_possible_cpu(cpu) {
cpu               548 net/xfrm/xfrm_interface.c 		stats = per_cpu_ptr(dev->tstats, cpu);
cpu                44 net/xfrm/xfrm_ipcomp.c 	const int cpu = get_cpu();
cpu                45 net/xfrm/xfrm_ipcomp.c 	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
cpu                46 net/xfrm/xfrm_ipcomp.c 	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
cpu               246 net/xfrm/xfrm_ipcomp.c 	int cpu;
cpu               264 net/xfrm/xfrm_ipcomp.c 	for_each_possible_cpu(cpu) {
cpu               265 net/xfrm/xfrm_ipcomp.c 		struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
cpu               275 net/xfrm/xfrm_ipcomp.c 	int cpu;
cpu               302 net/xfrm/xfrm_ipcomp.c 	for_each_possible_cpu(cpu) {
cpu               307 net/xfrm/xfrm_ipcomp.c 		*per_cpu_ptr(tfms, cpu) = tfm;
cpu               107 samples/bpf/cpustat_kern.c 	u32 key, cpu, pstate_idx;
cpu               113 samples/bpf/cpustat_kern.c 	cpu = ctx->cpu_id;
cpu               115 samples/bpf/cpustat_kern.c 	key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_TIME;
cpu               120 samples/bpf/cpustat_kern.c 	key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_IDX;
cpu               125 samples/bpf/cpustat_kern.c 	key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_TIME;
cpu               130 samples/bpf/cpustat_kern.c 	key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_IDX;
cpu               175 samples/bpf/cpustat_kern.c 		key = cpu * MAX_PSTATE_ENTRIES + pstate_idx;
cpu               198 samples/bpf/cpustat_kern.c 		key = cpu * MAX_CSTATE_ENTRIES + prev_state;
cpu               215 samples/bpf/cpustat_kern.c 	u32 key, cpu, pstate_idx;
cpu               218 samples/bpf/cpustat_kern.c 	cpu = ctx->cpu_id;
cpu               220 samples/bpf/cpustat_kern.c 	key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_TIME;
cpu               225 samples/bpf/cpustat_kern.c 	key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_IDX;
cpu               230 samples/bpf/cpustat_kern.c 	key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_IDX;
cpu               272 samples/bpf/cpustat_kern.c 	key = cpu * MAX_PSTATE_ENTRIES + pstate_idx;
cpu                31 samples/bpf/lathist_kern.c 	int cpu = bpf_get_smp_processor_id();
cpu                32 samples/bpf/lathist_kern.c 	u64 *ts = bpf_map_lookup_elem(&my_map, &cpu);
cpu                75 samples/bpf/lathist_kern.c 	int key, cpu;
cpu                78 samples/bpf/lathist_kern.c 	cpu = bpf_get_smp_processor_id();
cpu                79 samples/bpf/lathist_kern.c 	ts = bpf_map_lookup_elem(&my_map, &cpu);
cpu                89 samples/bpf/lathist_kern.c 	key = cpu * MAX_ENTRIES + delta;
cpu               202 samples/bpf/map_perf_test_kern.c 		int cpu = bpf_get_smp_processor_id();
cpu               205 samples/bpf/map_perf_test_kern.c 						      &cpu);
cpu                78 samples/bpf/map_perf_test_user.c static void test_hash_prealloc(int cpu)
cpu                87 samples/bpf/map_perf_test_user.c 	       cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
cpu               116 samples/bpf/map_perf_test_user.c static void do_test_lru(enum test_type test, int cpu)
cpu               129 samples/bpf/map_perf_test_user.c 		assert(cpu < MAX_NR_CPUS);
cpu               131 samples/bpf/map_perf_test_user.c 		if (cpu) {
cpu               135 samples/bpf/map_perf_test_user.c 			inner_lru_map_fds[cpu] =
cpu               142 samples/bpf/map_perf_test_user.c 			if (inner_lru_map_fds[cpu] == -1) {
cpu               148 samples/bpf/map_perf_test_user.c 			inner_lru_map_fds[cpu] = map_fd[inner_lru_hash_idx];
cpu               151 samples/bpf/map_perf_test_user.c 		ret = bpf_map_update_elem(outer_fd, &cpu,
cpu               152 samples/bpf/map_perf_test_user.c 					  &inner_lru_map_fds[cpu],
cpu               156 samples/bpf/map_perf_test_user.c 			       cpu, strerror(errno), errno);
cpu               192 samples/bpf/map_perf_test_user.c 	       cpu, test_name,
cpu               196 samples/bpf/map_perf_test_user.c static void test_lru_hash_prealloc(int cpu)
cpu               198 samples/bpf/map_perf_test_user.c 	do_test_lru(LRU_HASH_PREALLOC, cpu);
cpu               201 samples/bpf/map_perf_test_user.c static void test_nocommon_lru_hash_prealloc(int cpu)
cpu               203 samples/bpf/map_perf_test_user.c 	do_test_lru(NOCOMMON_LRU_HASH_PREALLOC, cpu);
cpu               206 samples/bpf/map_perf_test_user.c static void test_inner_lru_hash_prealloc(int cpu)
cpu               208 samples/bpf/map_perf_test_user.c 	do_test_lru(INNER_LRU_HASH_PREALLOC, cpu);
cpu               211 samples/bpf/map_perf_test_user.c static void test_lru_hash_lookup(int cpu)
cpu               213 samples/bpf/map_perf_test_user.c 	do_test_lru(LRU_HASH_LOOKUP, cpu);
cpu               216 samples/bpf/map_perf_test_user.c static void test_percpu_hash_prealloc(int cpu)
cpu               225 samples/bpf/map_perf_test_user.c 	       cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
cpu               228 samples/bpf/map_perf_test_user.c static void test_hash_kmalloc(int cpu)
cpu               237 samples/bpf/map_perf_test_user.c 	       cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
cpu               240 samples/bpf/map_perf_test_user.c static void test_percpu_hash_kmalloc(int cpu)
cpu               249 samples/bpf/map_perf_test_user.c 	       cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
cpu               252 samples/bpf/map_perf_test_user.c static void test_lpm_kmalloc(int cpu)
cpu               261 samples/bpf/map_perf_test_user.c 	       cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
cpu               264 samples/bpf/map_perf_test_user.c static void test_hash_lookup(int cpu)
cpu               273 samples/bpf/map_perf_test_user.c 	       cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
cpu               276 samples/bpf/map_perf_test_user.c static void test_array_lookup(int cpu)
cpu               285 samples/bpf/map_perf_test_user.c 	       cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
cpu               293 samples/bpf/map_perf_test_user.c typedef void (*test_func)(int cpu);
cpu               324 samples/bpf/map_perf_test_user.c static void loop(int cpu)
cpu               330 samples/bpf/map_perf_test_user.c 	CPU_SET(cpu, &cpuset);
cpu               335 samples/bpf/map_perf_test_user.c 			test_funcs[i](cpu);
cpu                33 samples/bpf/test_overhead_user.c static void test_task_rename(int cpu)
cpu                53 samples/bpf/test_overhead_user.c 	       cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
cpu                57 samples/bpf/test_overhead_user.c static void test_urandom_read(int cpu)
cpu                77 samples/bpf/test_overhead_user.c 	       cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
cpu                81 samples/bpf/test_overhead_user.c static void loop(int cpu, int flags)
cpu                86 samples/bpf/test_overhead_user.c 	CPU_SET(cpu, &cpuset);
cpu                90 samples/bpf/test_overhead_user.c 		test_task_rename(cpu);
cpu                92 samples/bpf/test_overhead_user.c 		test_urandom_read(cpu);
cpu                44 samples/bpf/trace_event_kern.c 	u32 cpu = bpf_get_smp_processor_id();
cpu                57 samples/bpf/trace_event_kern.c 		bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period,
cpu                35 samples/bpf/trace_output_user.c static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size)
cpu                24 samples/bpf/tracex6_user.c static void check_on_cpu(int cpu, struct perf_event_attr *attr)
cpu                33 samples/bpf/tracex6_user.c 	CPU_SET(cpu, &set);
cpu                36 samples/bpf/tracex6_user.c 	pmu_fd = sys_perf_event_open(attr, -1/*pid*/, cpu/*cpu*/, -1/*group_fd*/, 0);
cpu                38 samples/bpf/tracex6_user.c 		fprintf(stderr, "sys_perf_event_open failed on CPU %d\n", cpu);
cpu                42 samples/bpf/tracex6_user.c 	assert(bpf_map_update_elem(map_fd[0], &cpu, &pmu_fd, BPF_ANY) == 0);
cpu                45 samples/bpf/tracex6_user.c 	bpf_map_get_next_key(map_fd[1], &cpu, NULL);
cpu                47 samples/bpf/tracex6_user.c 	if (bpf_map_lookup_elem(map_fd[1], &cpu, &value)) {
cpu                48 samples/bpf/tracex6_user.c 		fprintf(stderr, "Value missing for CPU %d\n", cpu);
cpu                52 samples/bpf/tracex6_user.c 		fprintf(stderr, "CPU %d: %llu\n", cpu, value);
cpu                55 samples/bpf/tracex6_user.c 	if (bpf_map_lookup_elem(map_fd[2], &cpu, &value2)) {
cpu                56 samples/bpf/tracex6_user.c 		fprintf(stderr, "Value2 missing for CPU %d\n", cpu);
cpu                60 samples/bpf/tracex6_user.c 		fprintf(stderr, "CPU %d: counter: %llu, enabled: %llu, running: %llu\n", cpu,
cpu                65 samples/bpf/tracex6_user.c 	assert(bpf_map_delete_elem(map_fd[0], &cpu) == 0 || error);
cpu                68 samples/bpf/tracex6_user.c 	assert(bpf_map_delete_elem(map_fd[1], &cpu) == 0 || error);
cpu               153 samples/bpf/xdp_monitor_kern.c 	int cpu;		//	offset:16; size:4; signed:1;
cpu               188 samples/bpf/xdp_monitor_kern.c 	int cpu;		//	offset:16; size:4; signed:1;
cpu               128 samples/bpf/xdp_monitor_user.c 	struct datarec *cpu;
cpu               137 samples/bpf/xdp_monitor_user.c 	struct u64rec *cpu;
cpu               169 samples/bpf/xdp_monitor_user.c 		rec->cpu[i].processed = values[i].processed;
cpu               171 samples/bpf/xdp_monitor_user.c 		rec->cpu[i].dropped = values[i].dropped;
cpu               173 samples/bpf/xdp_monitor_user.c 		rec->cpu[i].info = values[i].info;
cpu               175 samples/bpf/xdp_monitor_user.c 		rec->cpu[i].err = values[i].err;
cpu               203 samples/bpf/xdp_monitor_user.c 		rec->cpu[i].processed = values[i].processed;
cpu               320 samples/bpf/xdp_monitor_user.c 			struct u64rec *r = &rec->cpu[i];
cpu               321 samples/bpf/xdp_monitor_user.c 			struct u64rec *p = &prev->cpu[i];
cpu               345 samples/bpf/xdp_monitor_user.c 			struct u64rec *r = &rec->cpu[i];
cpu               346 samples/bpf/xdp_monitor_user.c 			struct u64rec *p = &prev->cpu[i];
cpu               371 samples/bpf/xdp_monitor_user.c 			struct datarec *r = &rec->cpu[i];
cpu               372 samples/bpf/xdp_monitor_user.c 			struct datarec *p = &prev->cpu[i];
cpu               410 samples/bpf/xdp_monitor_user.c 			struct datarec *r = &rec->cpu[i];
cpu               411 samples/bpf/xdp_monitor_user.c 			struct datarec *p = &prev->cpu[i];
cpu               443 samples/bpf/xdp_monitor_user.c 			struct datarec *r = &rec->cpu[i];
cpu               444 samples/bpf/xdp_monitor_user.c 			struct datarec *p = &prev->cpu[i];
cpu               541 samples/bpf/xdp_monitor_user.c 		rec->xdp_redirect[i].cpu = alloc_rec_per_cpu(rec_sz);
cpu               544 samples/bpf/xdp_monitor_user.c 		rec->xdp_exception[i].cpu = alloc_rec_per_cpu(rec_sz);
cpu               547 samples/bpf/xdp_monitor_user.c 	rec->xdp_cpumap_kthread.cpu = alloc_rec_per_cpu(rec_sz);
cpu               548 samples/bpf/xdp_monitor_user.c 	rec->xdp_devmap_xmit.cpu    = alloc_rec_per_cpu(rec_sz);
cpu               551 samples/bpf/xdp_monitor_user.c 		rec->xdp_cpumap_enqueue[i].cpu = alloc_rec_per_cpu(rec_sz);
cpu               561 samples/bpf/xdp_monitor_user.c 		free(r->xdp_redirect[i].cpu);
cpu               564 samples/bpf/xdp_monitor_user.c 		free(r->xdp_exception[i].cpu);
cpu               566 samples/bpf/xdp_monitor_user.c 	free(r->xdp_cpumap_kthread.cpu);
cpu               567 samples/bpf/xdp_monitor_user.c 	free(r->xdp_devmap_xmit.cpu);
cpu               570 samples/bpf/xdp_monitor_user.c 		free(r->xdp_cpumap_enqueue[i].cpu);
cpu               658 samples/bpf/xdp_redirect_cpu_kern.c 	int cpu;		//	offset:16; size:4; signed:1;
cpu               698 samples/bpf/xdp_redirect_cpu_kern.c 	int cpu;		//	offset:16; size:4; signed:1;
cpu               166 samples/bpf/xdp_redirect_cpu_user.c 	struct datarec *cpu;
cpu               196 samples/bpf/xdp_redirect_cpu_user.c 		rec->cpu[i].processed = values[i].processed;
cpu               198 samples/bpf/xdp_redirect_cpu_user.c 		rec->cpu[i].dropped = values[i].dropped;
cpu               200 samples/bpf/xdp_redirect_cpu_user.c 		rec->cpu[i].issue = values[i].issue;
cpu               236 samples/bpf/xdp_redirect_cpu_user.c 	rec->rx_cnt.cpu    = alloc_record_per_cpu();
cpu               237 samples/bpf/xdp_redirect_cpu_user.c 	rec->redir_err.cpu = alloc_record_per_cpu();
cpu               238 samples/bpf/xdp_redirect_cpu_user.c 	rec->kthread.cpu   = alloc_record_per_cpu();
cpu               239 samples/bpf/xdp_redirect_cpu_user.c 	rec->exception.cpu = alloc_record_per_cpu();
cpu               241 samples/bpf/xdp_redirect_cpu_user.c 		rec->enq[i].cpu = alloc_record_per_cpu();
cpu               251 samples/bpf/xdp_redirect_cpu_user.c 		free(r->enq[i].cpu);
cpu               252 samples/bpf/xdp_redirect_cpu_user.c 	free(r->exception.cpu);
cpu               253 samples/bpf/xdp_redirect_cpu_user.c 	free(r->kthread.cpu);
cpu               254 samples/bpf/xdp_redirect_cpu_user.c 	free(r->redir_err.cpu);
cpu               255 samples/bpf/xdp_redirect_cpu_user.c 	free(r->rx_cnt.cpu);
cpu               334 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *r = &rec->cpu[i];
cpu               335 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *p = &prev->cpu[i];
cpu               362 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *r = &rec->cpu[i];
cpu               363 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *p = &prev->cpu[i];
cpu               399 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *r = &rec->cpu[i];
cpu               400 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *p = &prev->cpu[i];
cpu               428 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *r = &rec->cpu[i];
cpu               429 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *p = &prev->cpu[i];
cpu               450 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *r = &rec->cpu[i];
cpu               451 samples/bpf/xdp_redirect_cpu_user.c 			struct datarec *p = &prev->cpu[i];
cpu               499 samples/bpf/xdp_redirect_cpu_user.c static int create_cpu_entry(__u32 cpu, __u32 queue_size,
cpu               509 samples/bpf/xdp_redirect_cpu_user.c 	ret = bpf_map_update_elem(cpu_map_fd, &cpu, &queue_size, 0);
cpu               518 samples/bpf/xdp_redirect_cpu_user.c 	ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
cpu               541 samples/bpf/xdp_redirect_cpu_user.c 	       new ? "Add-new":"Replace", cpu, avail_idx,
cpu               190 samples/bpf/xdp_rxq_info_user.c 	struct datarec *cpu;
cpu               243 samples/bpf/xdp_rxq_info_user.c 		rec->rxq[i].cpu = alloc_record_per_cpu();
cpu               245 samples/bpf/xdp_rxq_info_user.c 	rec->stats.cpu = alloc_record_per_cpu();
cpu               255 samples/bpf/xdp_rxq_info_user.c 		free(r->rxq[i].cpu);
cpu               258 samples/bpf/xdp_rxq_info_user.c 	free(r->stats.cpu);
cpu               281 samples/bpf/xdp_rxq_info_user.c 		rec->cpu[i].processed = values[i].processed;
cpu               283 samples/bpf/xdp_rxq_info_user.c 		rec->cpu[i].issue = values[i].issue;
cpu               370 samples/bpf/xdp_rxq_info_user.c 			struct datarec *r = &rec->cpu[i];
cpu               371 samples/bpf/xdp_rxq_info_user.c 			struct datarec *p = &prev->cpu[i];
cpu               404 samples/bpf/xdp_rxq_info_user.c 			struct datarec *r = &rec->cpu[i];
cpu               405 samples/bpf/xdp_rxq_info_user.c 			struct datarec *p = &prev->cpu[i];
cpu                75 samples/bpf/xdp_sample_pkts_user.c static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size)
cpu              1503 security/selinux/selinuxfs.c 	int cpu;
cpu              1505 security/selinux/selinuxfs.c 	for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) {
cpu              1506 security/selinux/selinuxfs.c 		if (!cpu_possible(cpu))
cpu              1508 security/selinux/selinuxfs.c 		*idx = cpu + 1;
cpu              1509 security/selinux/selinuxfs.c 		return &per_cpu(avc_cache_stats, cpu);
cpu               211 sound/soc/generic/audio-graph-card.c 	struct device_node *ep = li->cpu ? cpu_ep : codec_ep;
cpu               221 sound/soc/generic/audio-graph-card.c 	if (!li->cpu && dup_codec)
cpu               232 sound/soc/generic/audio-graph-card.c 	if (li->cpu) {
cpu               346 sound/soc/generic/audio-graph-card.c 	if (!li->cpu)
cpu               492 sound/soc/generic/audio-graph-card.c 	for (li.cpu = 1; li.cpu >= 0; li.cpu--) {
cpu                96 sound/soc/generic/simple-card.c 				 struct device_node *cpu,
cpu               101 sound/soc/generic/simple-card.c 	struct device_node *node = of_get_parent(cpu);
cpu               109 sound/soc/generic/simple-card.c 	of_property_read_u32(cpu,	prop, &props->mclk_fs);
cpu               138 sound/soc/generic/simple-card.c 	if (li->cpu == (np == codec))
cpu               149 sound/soc/generic/simple-card.c 	if (li->cpu) {
cpu               256 sound/soc/generic/simple-card.c 	struct device_node *cpu = NULL;
cpu               269 sound/soc/generic/simple-card.c 	if (!li->cpu || np == codec)
cpu               272 sound/soc/generic/simple-card.c 	cpu  = np;
cpu               295 sound/soc/generic/simple-card.c 	simple_parse_mclk_fs(top, cpu, codec, dai_props, prefix);
cpu               297 sound/soc/generic/simple-card.c 	ret = asoc_simple_parse_cpu(cpu, dai_link, &single_cpu);
cpu               309 sound/soc/generic/simple-card.c 	ret = asoc_simple_parse_tdm(cpu, cpu_dai);
cpu               317 sound/soc/generic/simple-card.c 	ret = asoc_simple_parse_clk_cpu(dev, cpu, dai_link, cpu_dai);
cpu               477 sound/soc/generic/simple-card.c 	for (li.cpu = 1; li.cpu >= 0; li.cpu--) {
cpu               562 sound/soc/meson/axg-card.c 	struct snd_soc_dai_link_component *cpu;
cpu               565 sound/soc/meson/axg-card.c 	cpu = devm_kzalloc(card->dev, sizeof(*cpu), GFP_KERNEL);
cpu               566 sound/soc/meson/axg-card.c 	if (!cpu)
cpu               569 sound/soc/meson/axg-card.c 	dai_link->cpus = cpu;
cpu               118 sound/soc/qcom/apq8016_sbc.c 	struct device_node *np, *codec, *cpu, *node  = dev->of_node;
cpu               164 sound/soc/qcom/apq8016_sbc.c 		cpu = of_get_child_by_name(np, "cpu");
cpu               167 sound/soc/qcom/apq8016_sbc.c 		if (!cpu || !codec) {
cpu               173 sound/soc/qcom/apq8016_sbc.c 		link->cpus->of_node = of_parse_phandle(cpu, "sound-dai", 0);
cpu               180 sound/soc/qcom/apq8016_sbc.c 		ret = snd_soc_of_get_dai_name(cpu, &link->cpus->dai_name);
cpu               204 sound/soc/qcom/apq8016_sbc.c 		of_node_put(cpu);
cpu               212 sound/soc/qcom/apq8016_sbc.c 	of_node_put(cpu);
cpu                13 sound/soc/qcom/common.c 	struct device_node *cpu = NULL;
cpu                62 sound/soc/qcom/common.c 		cpu = of_get_child_by_name(np, "cpu");
cpu                66 sound/soc/qcom/common.c 		if (!cpu) {
cpu                72 sound/soc/qcom/common.c 		ret = of_parse_phandle_with_args(cpu, "sound-dai",
cpu                81 sound/soc/qcom/common.c 		ret = snd_soc_of_get_dai_name(cpu, &link->cpus->dai_name);
cpu               125 sound/soc/qcom/common.c 		of_node_put(cpu);
cpu               133 sound/soc/qcom/common.c 	of_node_put(cpu);
cpu               145 sound/soc/samsung/littlemill.c SND_SOC_DAILINK_DEFS(cpu,
cpu               162 sound/soc/samsung/littlemill.c 		SND_SOC_DAILINK_REG(cpu),
cpu                85 sound/soc/samsung/lowland.c SND_SOC_DAILINK_DEFS(cpu,
cpu               105 sound/soc/samsung/lowland.c 		SND_SOC_DAILINK_REG(cpu),
cpu               203 sound/soc/samsung/odroid.c 	struct device_node *cpu, *codec;
cpu               244 sound/soc/samsung/odroid.c 	cpu = of_get_child_by_name(dev->of_node, "cpu");
cpu               254 sound/soc/samsung/odroid.c 	num_pcms = of_count_phandle_with_args(cpu, "sound-dai",
cpu               263 sound/soc/samsung/odroid.c 		ret = of_parse_phandle_with_args(cpu, "sound-dai",
cpu               281 sound/soc/samsung/odroid.c 		cpu_dai = of_parse_phandle(cpu, "sound-dai", 0);
cpu               286 sound/soc/samsung/odroid.c 	of_node_put(cpu);
cpu               132 sound/soc/samsung/snow.c 	struct device_node *cpu, *codec;
cpu               161 sound/soc/samsung/snow.c 	cpu = of_get_child_by_name(dev->of_node, "cpu");
cpu               163 sound/soc/samsung/snow.c 	if (cpu) {
cpu               166 sound/soc/samsung/snow.c 		link->cpus->of_node = of_parse_phandle(cpu, "sound-dai", 0);
cpu               167 sound/soc/samsung/snow.c 		of_node_put(cpu);
cpu               112 sound/soc/samsung/tobermory.c SND_SOC_DAILINK_DEFS(cpu,
cpu               124 sound/soc/samsung/tobermory.c 		SND_SOC_DAILINK_REG(cpu),
cpu               205 tools/arch/mips/include/uapi/asm/kvm.h 	__u32 cpu;
cpu                36 tools/bpf/bpftool/map_perf_ring.c 	unsigned int cpu;
cpu                61 tools/bpf/bpftool/map_perf_ring.c 	int cpu;
cpu                66 tools/bpf/bpftool/map_perf_ring.c print_bpf_output(void *private_data, int cpu, struct perf_event_header *event)
cpu                75 tools/bpf/bpftool/map_perf_ring.c 	int idx = ctx->all_cpus ? cpu : ctx->idx;
cpu                82 tools/bpf/bpftool/map_perf_ring.c 		jsonw_uint(json_wtr, cpu);
cpu               104 tools/bpf/bpftool/map_perf_ring.c 			       cpu, idx);
cpu               131 tools/bpf/bpftool/map_perf_ring.c 		.cpu = -1,
cpu               158 tools/bpf/bpftool/map_perf_ring.c 			ctx.cpu = strtoul(*argv, &endptr, 0);
cpu               185 tools/bpf/bpftool/map_perf_ring.c 		if (ctx.idx == -1 || ctx.cpu == -1) {
cpu               190 tools/bpf/bpftool/map_perf_ring.c 		ctx.cpu = 0;
cpu               198 tools/bpf/bpftool/map_perf_ring.c 	opts.cpus = &ctx.cpu;
cpu                25 tools/include/linux/coresight-pmu.h static inline int coresight_get_trace_id(int cpu)
cpu                33 tools/include/linux/coresight-pmu.h 	return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
cpu                10 tools/lib/api/cpu.c 	int cpu;
cpu                12 tools/lib/api/cpu.c 	if (sysfs__read_int("devices/system/cpu/online", &cpu) < 0)
cpu                16 tools/lib/api/cpu.c 		 "devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpu);
cpu              5319 tools/lib/bpf/libbpf.c 	int cpu;
cpu              5345 tools/lib/bpf/libbpf.c 		pr_warning("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
cpu              5377 tools/lib/bpf/libbpf.c 			  int cpu, int map_key)
cpu              5388 tools/lib/bpf/libbpf.c 	cpu_buf->cpu = cpu;
cpu              5391 tools/lib/bpf/libbpf.c 	cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
cpu              5396 tools/lib/bpf/libbpf.c 			   cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
cpu              5407 tools/lib/bpf/libbpf.c 			   cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
cpu              5414 tools/lib/bpf/libbpf.c 			   cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
cpu              5542 tools/lib/bpf/libbpf.c 		int cpu, map_key;
cpu              5544 tools/lib/bpf/libbpf.c 		cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
cpu              5547 tools/lib/bpf/libbpf.c 		cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
cpu              5560 tools/lib/bpf/libbpf.c 				   cpu, map_key, cpu_buf->fd,
cpu              5571 tools/lib/bpf/libbpf.c 				   cpu, cpu_buf->fd,
cpu              5607 tools/lib/bpf/libbpf.c 		return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
cpu              5614 tools/lib/bpf/libbpf.c 			pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
cpu              5621 tools/lib/bpf/libbpf.c 			pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
cpu               364 tools/lib/bpf/libbpf.h typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu,
cpu               366 tools/lib/bpf/libbpf.h typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
cpu                35 tools/lib/lockdep/include/liblockdep/common.h 	int			cpu;
cpu              5586 tools/lib/traceevent/event-parse.c 		param = record->cpu;
cpu                46 tools/lib/traceevent/event-parse.h 	int			cpu;
cpu                24 tools/lib/traceevent/parse-filter.c static struct tep_format_field cpu = {
cpu               378 tools/lib/traceevent/parse-filter.c 				field = &cpu;
cpu              1558 tools/lib/traceevent/parse-filter.c 	if (field == &cpu)
cpu              1559 tools/lib/traceevent/parse-filter.c 		return record->cpu;
cpu                89 tools/lib/traceevent/plugins/plugin_function.c static int add_and_get_index(const char *parent, const char *child, int cpu)
cpu                93 tools/lib/traceevent/plugins/plugin_function.c 	if (cpu < 0)
cpu                96 tools/lib/traceevent/plugins/plugin_function.c 	if (cpu > cpus) {
cpu                99 tools/lib/traceevent/plugins/plugin_function.c 		ptr = realloc(fstack, sizeof(*fstack) * (cpu + 1));
cpu               108 tools/lib/traceevent/plugins/plugin_function.c 		for (i = cpus + 1; i <= cpu; i++)
cpu               110 tools/lib/traceevent/plugins/plugin_function.c 		cpus = cpu;
cpu               113 tools/lib/traceevent/plugins/plugin_function.c 	for (i = 0; i < fstack[cpu].size && fstack[cpu].stack[i]; i++) {
cpu               114 tools/lib/traceevent/plugins/plugin_function.c 		if (strcmp(parent, fstack[cpu].stack[i]) == 0) {
cpu               115 tools/lib/traceevent/plugins/plugin_function.c 			add_child(&fstack[cpu], child, i+1);
cpu               121 tools/lib/traceevent/plugins/plugin_function.c 	add_child(&fstack[cpu], parent, 0);
cpu               122 tools/lib/traceevent/plugins/plugin_function.c 	add_child(&fstack[cpu], child, 1);
cpu               147 tools/lib/traceevent/plugins/plugin_function.c 		index = add_and_get_index(parent, func, record->cpu);
cpu                59 tools/perf/arch/arm/util/cs-etm.c static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
cpu                62 tools/perf/arch/arm/util/cs-etm.c 				 struct evsel *evsel, int cpu)
cpu                73 tools/perf/arch/arm/util/cs-etm.c 	if (!cs_etm_is_etmv4(itr, cpu))
cpu                78 tools/perf/arch/arm/util/cs-etm.c 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
cpu               111 tools/perf/arch/arm/util/cs-etm.c 				struct evsel *evsel, int cpu)
cpu               122 tools/perf/arch/arm/util/cs-etm.c 	if (!cs_etm_is_etmv4(itr, cpu))
cpu               127 tools/perf/arch/arm/util/cs-etm.c 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
cpu               531 tools/perf/arch/arm/util/cs-etm.c static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
cpu               543 tools/perf/arch/arm/util/cs-etm.c 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
cpu               553 tools/perf/arch/arm/util/cs-etm.c static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
cpu               560 tools/perf/arch/arm/util/cs-etm.c 	snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
cpu               569 tools/perf/arch/arm/util/cs-etm.c static void cs_etm_get_metadata(int cpu, u32 *offset,
cpu               580 tools/perf/arch/arm/util/cs-etm.c 	if (cs_etm_is_etmv4(itr, cpu)) {
cpu               587 tools/perf/arch/arm/util/cs-etm.c 						coresight_get_trace_id(cpu);
cpu               590 tools/perf/arch/arm/util/cs-etm.c 			cs_etm_get_ro(cs_etm_pmu, cpu,
cpu               593 tools/perf/arch/arm/util/cs-etm.c 			cs_etm_get_ro(cs_etm_pmu, cpu,
cpu               596 tools/perf/arch/arm/util/cs-etm.c 			cs_etm_get_ro(cs_etm_pmu, cpu,
cpu               599 tools/perf/arch/arm/util/cs-etm.c 			cs_etm_get_ro(cs_etm_pmu, cpu,
cpu               602 tools/perf/arch/arm/util/cs-etm.c 			cs_etm_get_ro(cs_etm_pmu, cpu,
cpu               614 tools/perf/arch/arm/util/cs-etm.c 						coresight_get_trace_id(cpu);
cpu               617 tools/perf/arch/arm/util/cs-etm.c 			cs_etm_get_ro(cs_etm_pmu, cpu,
cpu               620 tools/perf/arch/arm/util/cs-etm.c 			cs_etm_get_ro(cs_etm_pmu, cpu,
cpu               629 tools/perf/arch/arm/util/cs-etm.c 	info->priv[*offset + CS_ETM_CPU] = cpu;
cpu                20 tools/perf/arch/arm64/util/header.c 	int cpu;
cpu                34 tools/perf/arch/arm64/util/header.c 	for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
cpu                36 tools/perf/arch/arm64/util/header.c 				sysfs, cpus->map[cpu]);
cpu               224 tools/perf/bench/epoll-ctl.c static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
cpu               258 tools/perf/bench/epoll-ctl.c 			CPU_SET(cpu->map[i % cpu->nr], &cpuset);
cpu               306 tools/perf/bench/epoll-ctl.c 	struct perf_cpu_map *cpu;
cpu               320 tools/perf/bench/epoll-ctl.c 	cpu = perf_cpu_map__new(NULL);
cpu               321 tools/perf/bench/epoll-ctl.c 	if (!cpu)
cpu               337 tools/perf/bench/epoll-ctl.c 		nthreads = cpu->nr;
cpu               366 tools/perf/bench/epoll-ctl.c 	do_threads(worker, cpu);
cpu               293 tools/perf/bench/epoll-wait.c static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
cpu               347 tools/perf/bench/epoll-wait.c 			CPU_SET(cpu->map[i % cpu->nr], &cpuset);
cpu               420 tools/perf/bench/epoll-wait.c 	struct perf_cpu_map *cpu;
cpu               434 tools/perf/bench/epoll-wait.c 	cpu = perf_cpu_map__new(NULL);
cpu               435 tools/perf/bench/epoll-wait.c 	if (!cpu)
cpu               456 tools/perf/bench/epoll-wait.c 		nthreads = cpu->nr - 1;
cpu               484 tools/perf/bench/epoll-wait.c 	do_threads(worker, cpu);
cpu               128 tools/perf/bench/futex-hash.c 	struct perf_cpu_map *cpu;
cpu               136 tools/perf/bench/futex-hash.c 	cpu = perf_cpu_map__new(NULL);
cpu               137 tools/perf/bench/futex-hash.c 	if (!cpu)
cpu               145 tools/perf/bench/futex-hash.c 		nthreads = cpu->nr;
cpu               172 tools/perf/bench/futex-hash.c 		CPU_SET(cpu->map[i % cpu->nr], &cpuset);
cpu               225 tools/perf/bench/futex-hash.c 	free(cpu);
cpu               120 tools/perf/bench/futex-lock-pi.c 			   struct perf_cpu_map *cpu)
cpu               138 tools/perf/bench/futex-lock-pi.c 		CPU_SET(cpu->map[i % cpu->nr], &cpuset);
cpu               154 tools/perf/bench/futex-lock-pi.c 	struct perf_cpu_map *cpu;
cpu               160 tools/perf/bench/futex-lock-pi.c 	cpu = perf_cpu_map__new(NULL);
cpu               161 tools/perf/bench/futex-lock-pi.c 	if (!cpu)
cpu               169 tools/perf/bench/futex-lock-pi.c 		nthreads = cpu->nr;
cpu               190 tools/perf/bench/futex-lock-pi.c 	create_threads(worker, thread_attr, cpu);
cpu                88 tools/perf/bench/futex-requeue.c 			  pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
cpu                98 tools/perf/bench/futex-requeue.c 		CPU_SET(cpu->map[i % cpu->nr], &cpuset);
cpu               121 tools/perf/bench/futex-requeue.c 	struct perf_cpu_map *cpu;
cpu               127 tools/perf/bench/futex-requeue.c 	cpu = perf_cpu_map__new(NULL);
cpu               128 tools/perf/bench/futex-requeue.c 	if (!cpu)
cpu               136 tools/perf/bench/futex-requeue.c 		nthreads = cpu->nr;
cpu               164 tools/perf/bench/futex-requeue.c 		block_threads(worker, thread_attr, cpu);
cpu               142 tools/perf/bench/futex-wake-parallel.c 			  struct perf_cpu_map *cpu)
cpu               152 tools/perf/bench/futex-wake-parallel.c 		CPU_SET(cpu->map[i % cpu->nr], &cpuset);
cpu               228 tools/perf/bench/futex-wake-parallel.c 	struct perf_cpu_map *cpu;
cpu               241 tools/perf/bench/futex-wake-parallel.c 	cpu = perf_cpu_map__new(NULL);
cpu               242 tools/perf/bench/futex-wake-parallel.c 	if (!cpu)
cpu               246 tools/perf/bench/futex-wake-parallel.c 		nblocked_threads = cpu->nr;
cpu               286 tools/perf/bench/futex-wake-parallel.c 		block_threads(blocked_worker, thread_attr, cpu);
cpu                94 tools/perf/bench/futex-wake.c 			  pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
cpu               104 tools/perf/bench/futex-wake.c 		CPU_SET(cpu->map[i % cpu->nr], &cpuset);
cpu               127 tools/perf/bench/futex-wake.c 	struct perf_cpu_map *cpu;
cpu               135 tools/perf/bench/futex-wake.c 	cpu = perf_cpu_map__new(NULL);
cpu               136 tools/perf/bench/futex-wake.c 	if (!cpu)
cpu               144 tools/perf/bench/futex-wake.c 		nthreads = cpu->nr;
cpu               169 tools/perf/bench/futex-wake.c 		block_threads(worker, thread_attr, cpu);
cpu               250 tools/perf/bench/numa.c 	struct bitmask *cpu = numa_allocate_cpumask();
cpu               253 tools/perf/bench/numa.c 	if (cpu && !numa_node_to_cpus(node, cpu)) {
cpu               254 tools/perf/bench/numa.c 		for (i = 0; i < cpu->size; i++) {
cpu               255 tools/perf/bench/numa.c 			if (numa_bitmask_isbitset(cpu, i))
cpu               274 tools/perf/bench/numa.c 		int cpu;
cpu               276 tools/perf/bench/numa.c 		for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
cpu               277 tools/perf/bench/numa.c 			CPU_SET(cpu, &mask);
cpu               293 tools/perf/bench/numa.c 	int cpu;
cpu               305 tools/perf/bench/numa.c 		for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
cpu               306 tools/perf/bench/numa.c 			CPU_SET(cpu, &mask);
cpu               313 tools/perf/bench/numa.c 		for (cpu = cpu_start; cpu < cpu_stop; cpu++)
cpu               314 tools/perf/bench/numa.c 			CPU_SET(cpu, &mask);
cpu               574 tools/perf/bench/numa.c 				int cpu;
cpu               591 tools/perf/bench/numa.c 				for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
cpu               592 tools/perf/bench/numa.c 					BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
cpu               593 tools/perf/bench/numa.c 					CPU_SET(cpu, &td->bind_cpumask);
cpu               857 tools/perf/bench/numa.c 	unsigned int cpu;
cpu               859 tools/perf/bench/numa.c 	cpu = sched_getcpu();
cpu               861 tools/perf/bench/numa.c 	g->threads[task_nr].curr_cpu = cpu;
cpu               977 tools/perf/bench/numa.c 	int cpu;
cpu               993 tools/perf/bench/numa.c 		cpu = td->curr_cpu;
cpu               996 tools/perf/bench/numa.c 		if (cpu < 0)
cpu               999 tools/perf/bench/numa.c 		node = numa_node_of_cpu(cpu);
cpu              1368 tools/perf/bench/numa.c 		int cpu;
cpu              1375 tools/perf/bench/numa.c 		for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
cpu              1376 tools/perf/bench/numa.c 			CPU_SET(cpu, &td->bind_cpumask);
cpu               277 tools/perf/builtin-annotate.c 	if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
cpu               213 tools/perf/builtin-c2c.c 	if (WARN_ONCE(sample->cpu == (unsigned int) -1,
cpu               217 tools/perf/builtin-c2c.c 	set_bit(sample->cpu, c2c_he->cpuset);
cpu               319 tools/perf/builtin-c2c.c 		int cpu = sample->cpu == (unsigned int) -1 ? 0 : sample->cpu;
cpu               320 tools/perf/builtin-c2c.c 		int node = c2c.cpu2node[cpu];
cpu              2033 tools/perf/builtin-c2c.c 	int node, cpu;
cpu              2056 tools/perf/builtin-c2c.c 	for (cpu = 0; cpu < c2c.cpus_cnt; cpu++)
cpu              2057 tools/perf/builtin-c2c.c 		cpu2node[cpu] = -1;
cpu              2075 tools/perf/builtin-c2c.c 		for (cpu = 0; cpu < map->nr; cpu++) {
cpu              2076 tools/perf/builtin-c2c.c 			set_bit(map->map[cpu], set);
cpu              2078 tools/perf/builtin-c2c.c 			if (WARN_ONCE(cpu2node[map->map[cpu]] != -1, "node/cpu topology bug"))
cpu              2081 tools/perf/builtin-c2c.c 			cpu2node[map->map[cpu]] = node;
cpu               403 tools/perf/builtin-diff.c 	if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) {
cpu                86 tools/perf/builtin-kmem.c 			     int bytes_req, int bytes_alloc, int cpu)
cpu               124 tools/perf/builtin-kmem.c 	data->alloc_cpu = cpu;
cpu               180 tools/perf/builtin-kmem.c 	if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
cpu               197 tools/perf/builtin-kmem.c 		int node1 = cpu__get_node(sample->cpu),
cpu               247 tools/perf/builtin-kmem.c 	if ((short)sample->cpu != s_alloc->alloc_cpu) {
cpu               739 tools/perf/builtin-kmem.c 		.cpu = sample->cpu,
cpu               268 tools/perf/builtin-report.c 	if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
cpu               840 tools/perf/builtin-sched.c 	int cpu = sample->cpu;
cpu               846 tools/perf/builtin-sched.c 	if (cpu >= MAX_CPUS || cpu < 0)
cpu               849 tools/perf/builtin-sched.c 	timestamp0 = sched->cpu_last_switched[cpu];
cpu               866 tools/perf/builtin-sched.c 	sched->cpu_last_switched[cpu] = timestamp;
cpu              1112 tools/perf/builtin-sched.c 	int cpu = sample->cpu, err = -1;
cpu              1115 tools/perf/builtin-sched.c 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
cpu              1117 tools/perf/builtin-sched.c 	timestamp0 = sched->cpu_last_switched[cpu];
cpu              1118 tools/perf/builtin-sched.c 	sched->cpu_last_switched[cpu] = timestamp;
cpu              1181 tools/perf/builtin-sched.c 	int cpu = sample->cpu, err = -1;
cpu              1186 tools/perf/builtin-sched.c 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
cpu              1532 tools/perf/builtin-sched.c 	int i, this_cpu = sample->cpu;
cpu              1605 tools/perf/builtin-sched.c 		int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
cpu              1606 tools/perf/builtin-sched.c 		struct thread *curr_thread = sched->curr_thread[cpu];
cpu              1614 tools/perf/builtin-sched.c 		if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
cpu              1617 tools/perf/builtin-sched.c 		if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
cpu              1620 tools/perf/builtin-sched.c 		if (cpu != this_cpu)
cpu              1625 tools/perf/builtin-sched.c 		if (sched->curr_thread[cpu]) {
cpu              1626 tools/perf/builtin-sched.c 			curr_tr = thread__get_runtime(sched->curr_thread[cpu]);
cpu              1669 tools/perf/builtin-sched.c 	int this_cpu = sample->cpu, err = 0;
cpu              1864 tools/perf/builtin-sched.c 				  u64 timestamp, u32 cpu)
cpu              1871 tools/perf/builtin-sched.c 	if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
cpu              1872 tools/perf/builtin-sched.c 		int i, n = __roundup_pow_of_two(cpu+1);
cpu              1886 tools/perf/builtin-sched.c 	r->last_time[cpu] = timestamp;
cpu              1890 tools/perf/builtin-sched.c static u64 perf_evsel__get_time(struct evsel *evsel, u32 cpu)
cpu              1894 tools/perf/builtin-sched.c 	if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
cpu              1897 tools/perf/builtin-sched.c 	return r->last_time[cpu];
cpu              2012 tools/perf/builtin-sched.c 	printf("%15s [%04d] ", tstr, sample->cpu);
cpu              2021 tools/perf/builtin-sched.c 			if (i == sample->cpu)
cpu              2248 tools/perf/builtin-sched.c static struct thread *get_idle_thread(int cpu)
cpu              2254 tools/perf/builtin-sched.c 	if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
cpu              2255 tools/perf/builtin-sched.c 		int i, j = __roundup_pow_of_two(cpu+1);
cpu              2270 tools/perf/builtin-sched.c 	if (idle_threads[cpu] == NULL) {
cpu              2271 tools/perf/builtin-sched.c 		idle_threads[cpu] = thread__new(0, 0);
cpu              2273 tools/perf/builtin-sched.c 		if (idle_threads[cpu]) {
cpu              2274 tools/perf/builtin-sched.c 			if (init_idle_thread(idle_threads[cpu]) < 0)
cpu              2279 tools/perf/builtin-sched.c 	return idle_threads[cpu];
cpu              2300 tools/perf/builtin-sched.c 		thread = get_idle_thread(sample->cpu);
cpu              2302 tools/perf/builtin-sched.c 			pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
cpu              2318 tools/perf/builtin-sched.c 			idle = get_idle_thread(sample->cpu);
cpu              2320 tools/perf/builtin-sched.c 				pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
cpu              2382 tools/perf/builtin-sched.c 	printf("%15s [%04d] ", tstr, sample->cpu);
cpu              2455 tools/perf/builtin-sched.c 	printf("%15s [%04d] ", tstr, sample->cpu);
cpu              2463 tools/perf/builtin-sched.c 			c = (i == sample->cpu) ? 'm' : ' ';
cpu              2546 tools/perf/builtin-sched.c 	tprev = perf_evsel__get_time(evsel, sample->cpu);
cpu              2629 tools/perf/builtin-sched.c 	perf_evsel__save_time(evsel, sample->time, sample->cpu);
cpu              2652 tools/perf/builtin-sched.c 	printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
cpu              2918 tools/perf/builtin-sched.c 	int this_cpu = sample->cpu;
cpu               652 tools/perf/builtin-script.c 			printed += fprintf(fp, "%3d ", sample->cpu);
cpu               654 tools/perf/builtin-script.c 			printed += fprintf(fp, "[%03d] ", sample->cpu);
cpu               971 tools/perf/builtin-script.c 			   u8 cpumode, int cpu, struct symbol **lastsym,
cpu               984 tools/perf/builtin-script.c 	al.cpu = cpu;
cpu              1029 tools/perf/builtin-script.c 	x.cpu = sample->cpu;
cpu              1039 tools/perf/builtin-script.c 					   x.cpumode, x.cpu, &lastsym, attr, fp);
cpu              1069 tools/perf/builtin-script.c 			printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
cpu              1115 tools/perf/builtin-script.c 	printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
cpu              1212 tools/perf/builtin-script.c 	size_t depth = thread_stack__depth(thread, sample->cpu);
cpu              1748 tools/perf/builtin-script.c 				       sample->cpu,
cpu              1755 tools/perf/builtin-script.c 						      sample->cpu,
cpu              1769 tools/perf/builtin-script.c 	int depth = thread_stack__depth(thread, sample->cpu);
cpu              1851 tools/perf/builtin-script.c 		event_format__fprintf(evsel->tp_format, sample->cpu,
cpu              1923 tools/perf/builtin-script.c 	int cpu, thread;
cpu              1936 tools/perf/builtin-script.c 		for (cpu = 0; cpu < ncpus; cpu++) {
cpu              1939 tools/perf/builtin-script.c 			counts = perf_counts(counter->counts, cpu, thread);
cpu              1942 tools/perf/builtin-script.c 				counter->core.cpus->map[cpu],
cpu              1988 tools/perf/builtin-script.c 		return !test_bit(sample->cpu, cpu_bitmap);
cpu              2106 tools/perf/builtin-script.c 		sample->cpu = 0;
cpu              2144 tools/perf/builtin-script.c 		sample->cpu = 0;
cpu              2180 tools/perf/builtin-script.c 		sample->cpu = 0;
cpu              2212 tools/perf/builtin-script.c 		sample->cpu = 0;
cpu              2250 tools/perf/builtin-script.c 		sample->cpu = 0;
cpu              2284 tools/perf/builtin-script.c 		sample->cpu = 0;
cpu               241 tools/perf/builtin-stat.c perf_evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread,
cpu               244 tools/perf/builtin-stat.c 	struct perf_sample_id *sid = SID(counter, cpu, thread);
cpu               246 tools/perf/builtin-stat.c 	return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
cpu               250 tools/perf/builtin-stat.c static int read_single_counter(struct evsel *counter, int cpu,
cpu               256 tools/perf/builtin-stat.c 			perf_counts(counter->counts, cpu, thread);
cpu               261 tools/perf/builtin-stat.c 	return perf_evsel__read_counter(counter, cpu, thread);
cpu               271 tools/perf/builtin-stat.c 	int ncpus, cpu, thread;
cpu               285 tools/perf/builtin-stat.c 		for (cpu = 0; cpu < ncpus; cpu++) {
cpu               288 tools/perf/builtin-stat.c 			count = perf_counts(counter->counts, cpu, thread);
cpu               294 tools/perf/builtin-stat.c 			if (!perf_counts__is_loaded(counter->counts, cpu, thread) &&
cpu               295 tools/perf/builtin-stat.c 			    read_single_counter(counter, cpu, thread, rs)) {
cpu               297 tools/perf/builtin-stat.c 				perf_counts(counter->counts, cpu, thread)->ena = 0;
cpu               298 tools/perf/builtin-stat.c 				perf_counts(counter->counts, cpu, thread)->run = 0;
cpu               302 tools/perf/builtin-stat.c 			perf_counts__set_loaded(counter->counts, cpu, thread, false);
cpu               305 tools/perf/builtin-stat.c 				if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
cpu               315 tools/perf/builtin-stat.c 						cpu,
cpu               810 tools/perf/builtin-stat.c 				 struct perf_cpu_map *map, int cpu)
cpu               812 tools/perf/builtin-stat.c 	return cpu_map__get_socket(map, cpu, NULL);
cpu               816 tools/perf/builtin-stat.c 			      struct perf_cpu_map *map, int cpu)
cpu               818 tools/perf/builtin-stat.c 	return cpu_map__get_die(map, cpu, NULL);
cpu               822 tools/perf/builtin-stat.c 			       struct perf_cpu_map *map, int cpu)
cpu               824 tools/perf/builtin-stat.c 	return cpu_map__get_core(map, cpu, NULL);
cpu               830 tools/perf/builtin-stat.c 	int cpu;
cpu               835 tools/perf/builtin-stat.c 	cpu = map->map[idx];
cpu               837 tools/perf/builtin-stat.c 	if (config->cpus_aggr_map->map[cpu] == -1)
cpu               838 tools/perf/builtin-stat.c 		config->cpus_aggr_map->map[cpu] = get_id(config, map, idx);
cpu               840 tools/perf/builtin-stat.c 	return config->cpus_aggr_map->map[cpu];
cpu               936 tools/perf/builtin-stat.c 	int cpu;
cpu               941 tools/perf/builtin-stat.c 	cpu = map->map[idx];
cpu               943 tools/perf/builtin-stat.c 	if (cpu >= env->nr_cpus_avail)
cpu               946 tools/perf/builtin-stat.c 	return cpu;
cpu               952 tools/perf/builtin-stat.c 	int cpu = perf_env__get_cpu(env, map, idx);
cpu               954 tools/perf/builtin-stat.c 	return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
cpu               960 tools/perf/builtin-stat.c 	int die_id = -1, cpu = perf_env__get_cpu(env, map, idx);
cpu               962 tools/perf/builtin-stat.c 	if (cpu != -1) {
cpu               969 tools/perf/builtin-stat.c 		if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n"))
cpu               972 tools/perf/builtin-stat.c 		if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n"))
cpu               975 tools/perf/builtin-stat.c 		die_id = (env->cpu[cpu].socket_id << 8) | (env->cpu[cpu].die_id & 0xff);
cpu               984 tools/perf/builtin-stat.c 	int core = -1, cpu = perf_env__get_cpu(env, map, idx);
cpu               986 tools/perf/builtin-stat.c 	if (cpu != -1) {
cpu               994 tools/perf/builtin-stat.c 		if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n"))
cpu               997 tools/perf/builtin-stat.c 		if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n"))
cpu              1000 tools/perf/builtin-stat.c 		if (WARN_ONCE(env->cpu[cpu].core_id >> 16, "The core id number is too big.\n"))
cpu              1003 tools/perf/builtin-stat.c 		core = (env->cpu[cpu].socket_id << 24) |
cpu              1004 tools/perf/builtin-stat.c 		       (env->cpu[cpu].die_id << 16) |
cpu              1005 tools/perf/builtin-stat.c 		       (env->cpu[cpu].core_id & 0xffff);
cpu               145 tools/perf/builtin-timechart.c 	int cpu;
cpu               179 tools/perf/builtin-timechart.c 	int cpu;
cpu               268 tools/perf/builtin-timechart.c 			   unsigned int cpu, u64 start, u64 end,
cpu               291 tools/perf/builtin-timechart.c 	sample->cpu = cpu;
cpu               347 tools/perf/builtin-timechart.c static void c_state_start(int cpu, u64 timestamp, int state)
cpu               349 tools/perf/builtin-timechart.c 	cpus_cstate_start_times[cpu] = timestamp;
cpu               350 tools/perf/builtin-timechart.c 	cpus_cstate_state[cpu] = state;
cpu               353 tools/perf/builtin-timechart.c static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
cpu               360 tools/perf/builtin-timechart.c 	pwr->state = cpus_cstate_state[cpu];
cpu               361 tools/perf/builtin-timechart.c 	pwr->start_time = cpus_cstate_start_times[cpu];
cpu               363 tools/perf/builtin-timechart.c 	pwr->cpu = cpu;
cpu               370 tools/perf/builtin-timechart.c static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
cpu               381 tools/perf/builtin-timechart.c 	pwr->state = cpus_pstate_state[cpu];
cpu               382 tools/perf/builtin-timechart.c 	pwr->start_time = cpus_pstate_start_times[cpu];
cpu               384 tools/perf/builtin-timechart.c 	pwr->cpu = cpu;
cpu               393 tools/perf/builtin-timechart.c 	cpus_pstate_state[cpu] = new_freq;
cpu               394 tools/perf/builtin-timechart.c 	cpus_pstate_start_times[cpu] = timestamp;
cpu               406 tools/perf/builtin-timechart.c static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
cpu               432 tools/perf/builtin-timechart.c 		pid_put_sample(tchart, p->pid, p->current->state, cpu,
cpu               439 tools/perf/builtin-timechart.c static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
cpu               450 tools/perf/builtin-timechart.c 		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
cpu               455 tools/perf/builtin-timechart.c 			pid_put_sample(tchart, next_pid, p->current->state, cpu,
cpu               615 tools/perf/builtin-timechart.c 	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
cpu               629 tools/perf/builtin-timechart.c 	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
cpu               654 tools/perf/builtin-timechart.c 	c_state_end(tchart, sample->cpu, sample->time);
cpu               678 tools/perf/builtin-timechart.c 	u64 cpu;
cpu               681 tools/perf/builtin-timechart.c 	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
cpu               688 tools/perf/builtin-timechart.c 		pwr->state = cpus_cstate_state[cpu];
cpu               689 tools/perf/builtin-timechart.c 		pwr->start_time = cpus_cstate_start_times[cpu];
cpu               691 tools/perf/builtin-timechart.c 		pwr->cpu = cpu;
cpu               703 tools/perf/builtin-timechart.c 		pwr->state = cpus_pstate_state[cpu];
cpu               704 tools/perf/builtin-timechart.c 		pwr->start_time = cpus_pstate_start_times[cpu];
cpu               706 tools/perf/builtin-timechart.c 		pwr->cpu = cpu;
cpu              1020 tools/perf/builtin-timechart.c 			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
cpu              1029 tools/perf/builtin-timechart.c 			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
cpu              1115 tools/perf/builtin-timechart.c 					svg_process(sample->cpu,
cpu              1269 tools/perf/builtin-timechart.c 					svg_running(Y, sample->cpu,
cpu              1274 tools/perf/builtin-timechart.c 					svg_blocked(Y, sample->cpu,
cpu              1279 tools/perf/builtin-timechart.c 					svg_waiting(Y, sample->cpu,
cpu              1912 tools/perf/builtin-trace.c 				   sample->pid, sample->tid, sample->cpu);
cpu              2409 tools/perf/builtin-trace.c 			event_format__fprintf(evsel->tp_format, sample->cpu,
cpu              2704 tools/perf/builtin-trace.c 		       sample->cpu, sample->raw_size);
cpu                91 tools/perf/lib/cpumap.c 	int n, cpu, prev;
cpu                97 tools/perf/lib/cpumap.c 		n = fscanf(file, "%u%c", &cpu, &sep);
cpu               101 tools/perf/lib/cpumap.c 			int new_max = nr_cpus + cpu - prev - 1;
cpu               114 tools/perf/lib/cpumap.c 			while (++prev < cpu)
cpu               125 tools/perf/lib/cpumap.c 		tmp_cpus[nr_cpus++] = cpu;
cpu               127 tools/perf/lib/cpumap.c 			prev = cpu;
cpu               252 tools/perf/lib/cpumap.c int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
cpu               257 tools/perf/lib/cpumap.c 		if (cpus->map[i] == cpu)
cpu               186 tools/perf/lib/evlist.c 				 int cpu, int thread, u64 id)
cpu               189 tools/perf/lib/evlist.c 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
cpu               199 tools/perf/lib/evlist.c 			 int cpu, int thread, u64 id)
cpu               201 tools/perf/lib/evlist.c 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
cpu               207 tools/perf/lib/evlist.c 			   int cpu, int thread, int fd)
cpu               242 tools/perf/lib/evlist.c 	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
cpu                47 tools/perf/lib/evsel.c 		int cpu, thread;
cpu                48 tools/perf/lib/evsel.c 		for (cpu = 0; cpu < ncpus; cpu++) {
cpu                50 tools/perf/lib/evsel.c 				FD(evsel, cpu, thread) = -1;
cpu                60 tools/perf/lib/evsel.c 		    pid_t pid, int cpu, int group_fd,
cpu                63 tools/perf/lib/evsel.c 	return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
cpu                69 tools/perf/lib/evsel.c 	int cpu, thread, err = 0;
cpu                99 tools/perf/lib/evsel.c 	for (cpu = 0; cpu < cpus->nr; cpu++) {
cpu               105 tools/perf/lib/evsel.c 						 cpus->map[cpu], -1, 0);
cpu               110 tools/perf/lib/evsel.c 			FD(evsel, cpu, thread) = fd;
cpu               119 tools/perf/lib/evsel.c 	int cpu, thread;
cpu               121 tools/perf/lib/evsel.c 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
cpu               123 tools/perf/lib/evsel.c 			close(FD(evsel, cpu, thread));
cpu               124 tools/perf/lib/evsel.c 			FD(evsel, cpu, thread) = -1;
cpu               168 tools/perf/lib/evsel.c int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
cpu               175 tools/perf/lib/evsel.c 	if (FD(evsel, cpu, thread) < 0)
cpu               178 tools/perf/lib/evsel.c 	if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
cpu               187 tools/perf/lib/evsel.c 	int cpu, thread;
cpu               189 tools/perf/lib/evsel.c 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
cpu               191 tools/perf/lib/evsel.c 			int fd = FD(evsel, cpu, thread),
cpu                17 tools/perf/lib/include/internal/cpumap.h int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu);
cpu                77 tools/perf/lib/include/internal/evlist.h 			 int cpu, int thread, u64 id);
cpu                81 tools/perf/lib/include/internal/evlist.h 			   int cpu, int thread, int fd);
cpu                30 tools/perf/lib/include/internal/evsel.h 	int			 cpu;
cpu                22 tools/perf/lib/include/internal/mmap.h 	int		 cpu;
cpu                21 tools/perf/lib/include/perf/cpumap.h #define perf_cpu_map__for_each_cpu(cpu, idx, cpus)		\
cpu                22 tools/perf/lib/include/perf/cpumap.h 	for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx);	\
cpu                24 tools/perf/lib/include/perf/cpumap.h 	     (idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
cpu               132 tools/perf/lib/include/perf/event.h 	__u16			 cpu[];
cpu               200 tools/perf/lib/include/perf/event.h 	__u64			 cpu;
cpu               224 tools/perf/lib/include/perf/event.h 	__u32			 cpu;
cpu               234 tools/perf/lib/include/perf/event.h 	__u32			 cpu;
cpu               289 tools/perf/lib/include/perf/event.h 	__u32			 cpu;
cpu                31 tools/perf/lib/include/perf/evsel.h LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
cpu                30 tools/perf/lib/tests/test-evlist.c 	int err, cpu, tmp;
cpu                56 tools/perf/lib/tests/test-evlist.c 		perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
cpu                59 tools/perf/lib/tests/test-evlist.c 			perf_evsel__read(evsel, cpu, 0, &counts);
cpu                24 tools/perf/lib/tests/test-evsel.c 	int err, cpu, tmp;
cpu                35 tools/perf/lib/tests/test-evsel.c 	perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
cpu                38 tools/perf/lib/tests/test-evsel.c 		perf_evsel__read(evsel, cpu, 0, &counts);
cpu                15 tools/perf/perf-sys.h void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
cpu                24 tools/perf/perf-sys.h 		      pid_t pid, int cpu, int group_fd,
cpu                29 tools/perf/perf-sys.h 	fd = syscall(__NR_perf_event_open, attr, pid, cpu,
cpu                34 tools/perf/perf-sys.h 		test_attr__open(attr, pid, cpu, fd, group_fd, flags);
cpu                67 tools/perf/tests/attr.c static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu,
cpu                95 tools/perf/tests/attr.c 	__WRITE_ASS(cpu,      "d", cpu);
cpu               146 tools/perf/tests/attr.c void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
cpu               151 tools/perf/tests/attr.c 	if ((fd != -1) && store_event(attr, pid, cpu, fd, group_fd, flags)) {
cpu                65 tools/perf/tests/cpumap.c 	TEST_ASSERT_VAL("wrong cpu",  cpus->cpu[0] == 1);
cpu                66 tools/perf/tests/cpumap.c 	TEST_ASSERT_VAL("wrong cpu",  cpus->cpu[1] == 256);
cpu               155 tools/perf/tests/hists_cumulate.c #define CPU(he)   (he->cpu)
cpu                18 tools/perf/tests/hists_output.c 	u32 cpu;
cpu                29 tools/perf/tests/hists_output.c 	{ .cpu = 0, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
cpu                31 tools/perf/tests/hists_output.c 	{ .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
cpu                33 tools/perf/tests/hists_output.c 	{ .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
cpu                35 tools/perf/tests/hists_output.c 	{ .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
cpu                37 tools/perf/tests/hists_output.c 	{ .cpu = 2, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
cpu                39 tools/perf/tests/hists_output.c 	{ .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
cpu                41 tools/perf/tests/hists_output.c 	{ .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
cpu                43 tools/perf/tests/hists_output.c 	{ .cpu = 3, .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_MAIN, },
cpu                45 tools/perf/tests/hists_output.c 	{ .cpu = 0, .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, },
cpu                47 tools/perf/tests/hists_output.c 	{ .cpu = 1, .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
cpu                66 tools/perf/tests/hists_output.c 		sample.cpu = fake_samples[i].cpu;
cpu               121 tools/perf/tests/hists_output.c #define CPU(he)   (he->cpu)
cpu                25 tools/perf/tests/openat-syscall-all-cpus.c 	int err = -1, fd, cpu;
cpu                61 tools/perf/tests/openat-syscall-all-cpus.c 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
cpu                62 tools/perf/tests/openat-syscall-all-cpus.c 		unsigned int ncalls = nr_openat_calls + cpu;
cpu                69 tools/perf/tests/openat-syscall-all-cpus.c 		if (cpus->map[cpu] >= CPU_SETSIZE) {
cpu                70 tools/perf/tests/openat-syscall-all-cpus.c 			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
cpu                74 tools/perf/tests/openat-syscall-all-cpus.c 		CPU_SET(cpus->map[cpu], &cpu_set);
cpu                77 tools/perf/tests/openat-syscall-all-cpus.c 				 cpus->map[cpu],
cpu                85 tools/perf/tests/openat-syscall-all-cpus.c 		CPU_CLR(cpus->map[cpu], &cpu_set);
cpu               100 tools/perf/tests/openat-syscall-all-cpus.c 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
cpu               103 tools/perf/tests/openat-syscall-all-cpus.c 		if (cpus->map[cpu] >= CPU_SETSIZE)
cpu               106 tools/perf/tests/openat-syscall-all-cpus.c 		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
cpu               112 tools/perf/tests/openat-syscall-all-cpus.c 		expected = nr_openat_calls + cpu;
cpu               113 tools/perf/tests/openat-syscall-all-cpus.c 		if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
cpu               115 tools/perf/tests/openat-syscall-all-cpus.c 				 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
cpu                18 tools/perf/tests/perf-record.c 	int i, cpu = -1, nrcpus = 1024;
cpu                33 tools/perf/tests/perf-record.c 			if (cpu == -1)
cpu                34 tools/perf/tests/perf-record.c 				cpu = i;
cpu                40 tools/perf/tests/perf-record.c 	return cpu;
cpu                68 tools/perf/tests/perf-record.c 	u32 cpu;
cpu               120 tools/perf/tests/perf-record.c 	cpu = err;
cpu               193 tools/perf/tests/perf-record.c 					pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
cpu               205 tools/perf/tests/perf-record.c 				if (sample.cpu != cpu) {
cpu               207 tools/perf/tests/perf-record.c 						 name, cpu, sample.cpu);
cpu                63 tools/perf/tests/sample-parsing.c 		COMP(cpu);
cpu               195 tools/perf/tests/sample-parsing.c 		.cpu		= 110,
cpu                71 tools/perf/tests/stat.c 	TEST_ASSERT_VAL("wrong cpu",    st->cpu    == 1);
cpu                88 tools/perf/tests/switch-tracking.c static int check_cpu(struct switch_tracking *switch_tracking, int cpu)
cpu                90 tools/perf/tests/switch-tracking.c 	int i, nr = cpu + 1;
cpu                92 tools/perf/tests/switch-tracking.c 	if (cpu < 0)
cpu               105 tools/perf/tests/switch-tracking.c 	if (cpu >= switch_tracking->nr_tids) {
cpu               128 tools/perf/tests/switch-tracking.c 	int cpu, err;
cpu               139 tools/perf/tests/switch-tracking.c 		cpu = sample.cpu;
cpu               141 tools/perf/tests/switch-tracking.c 			  cpu, prev_tid, next_tid);
cpu               142 tools/perf/tests/switch-tracking.c 		err = check_cpu(switch_tracking, cpu);
cpu               149 tools/perf/tests/switch-tracking.c 		if (switch_tracking->tids[cpu] != -1 &&
cpu               150 tools/perf/tests/switch-tracking.c 		    switch_tracking->tids[cpu] != prev_tid) {
cpu               154 tools/perf/tests/switch-tracking.c 		switch_tracking->tids[cpu] = next_tid;
cpu                94 tools/perf/tests/topology.c 	if (!session->header.env.cpu)
cpu               101 tools/perf/tests/topology.c 			 session->header.env.cpu[i].core_id,
cpu               102 tools/perf/tests/topology.c 			 session->header.env.cpu[i].socket_id);
cpu               107 tools/perf/tests/topology.c 			(session->header.env.cpu[map->map[i]].core_id == (cpu_map__get_core(map, i, NULL) & 0xffff)));
cpu               110 tools/perf/tests/topology.c 			(session->header.env.cpu[map->map[i]].socket_id == cpu_map__get_socket(map, i, NULL)));
cpu                52 tools/perf/ui/browsers/res_sample.c 			     res_samples[i].cpu, res_samples[i].tid) < 0) {
cpu                81 tools/perf/ui/browsers/res_sample.c 		     r->cpu >= 0 ? "--cpu " : "",
cpu                82 tools/perf/ui/browsers/res_sample.c 		     r->cpu >= 0 ? (sprintf(cpubuf, "%d", r->cpu), cpubuf) : "",
cpu                46 tools/perf/util/arm-spe.c 	int			cpu;
cpu                80 tools/perf/util/auxtrace.c 	mm->cpu = mp->cpu;
cpu               136 tools/perf/util/auxtrace.c 		mp->cpu = evlist->core.cpus->map[idx];
cpu               142 tools/perf/util/auxtrace.c 		mp->cpu = -1;
cpu               203 tools/perf/util/auxtrace.c 		queue_array[i].cpu = queues->queue_array[i].cpu;
cpu               254 tools/perf/util/auxtrace.c 		queue->cpu = buffer->cpu;
cpu               255 tools/perf/util/auxtrace.c 	} else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
cpu               257 tools/perf/util/auxtrace.c 		       queue->cpu, queue->tid, buffer->cpu, buffer->tid);
cpu               305 tools/perf/util/auxtrace.c static bool filter_cpu(struct perf_session *session, int cpu)
cpu               309 tools/perf/util/auxtrace.c 	return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
cpu               320 tools/perf/util/auxtrace.c 	if (filter_cpu(session, buffer->cpu))
cpu               365 tools/perf/util/auxtrace.c 		.cpu = event->auxtrace.cpu,
cpu               865 tools/perf/util/auxtrace.c 			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
cpu               875 tools/perf/util/auxtrace.c 	auxtrace_error->cpu = cpu;
cpu               951 tools/perf/util/auxtrace.c 			event->auxtrace.tid, event->auxtrace.cpu);
cpu              1195 tools/perf/util/auxtrace.c 		       e->cpu, e->pid, e->tid, e->ip, e->code, msg);
cpu              1321 tools/perf/util/auxtrace.c 	ev.auxtrace.cpu = mm->cpu;
cpu               190 tools/perf/util/auxtrace.h 	int			cpu;
cpu               215 tools/perf/util/auxtrace.h 	int			cpu;
cpu               279 tools/perf/util/auxtrace.h 	int		cpu;
cpu               300 tools/perf/util/auxtrace.h 	int		cpu;
cpu               525 tools/perf/util/auxtrace.h 			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
cpu                19 tools/perf/util/cloexec.c 	unsigned cpu;
cpu                20 tools/perf/util/cloexec.c 	int err = syscall(__NR_getcpu, &cpu, NULL, NULL);
cpu                22 tools/perf/util/cloexec.c 		return cpu;
cpu                39 tools/perf/util/cloexec.c 	int cpu;
cpu                43 tools/perf/util/cloexec.c 	cpu = sched_getcpu();
cpu                44 tools/perf/util/cloexec.c 	if (cpu < 0)
cpu                45 tools/perf/util/cloexec.c 		cpu = 0;
cpu                53 tools/perf/util/cloexec.c 		fd = sys_perf_event_open(&attr, pid, cpu, -1,
cpu                74 tools/perf/util/cloexec.c 		fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
cpu                21 tools/perf/util/counts.h perf_counts(struct perf_counts *counts, int cpu, int thread)
cpu                23 tools/perf/util/counts.h 	return xyarray__entry(counts->values, cpu, thread);
cpu                27 tools/perf/util/counts.h perf_counts__is_loaded(struct perf_counts *counts, int cpu, int thread)
cpu                29 tools/perf/util/counts.h 	return *((bool *) xyarray__entry(counts->loaded, cpu, thread));
cpu                33 tools/perf/util/counts.h perf_counts__set_loaded(struct perf_counts *counts, int cpu, int thread, bool loaded)
cpu                35 tools/perf/util/counts.h 	*((bool *) xyarray__entry(counts->loaded, cpu, thread)) = loaded;
cpu                35 tools/perf/util/cpumap.c 			if (cpus->cpu[i] == (u16) -1)
cpu                38 tools/perf/util/cpumap.c 				map->map[i] = (int) cpus->cpu[i];
cpu                54 tools/perf/util/cpumap.c 		int cpu, i = 0;
cpu                56 tools/perf/util/cpumap.c 		for_each_set_bit(cpu, mask->mask, nbits)
cpu                57 tools/perf/util/cpumap.c 			map->map[i++] = cpu;
cpu                98 tools/perf/util/cpumap.c static int cpu__get_topology_int(int cpu, const char *name, int *value)
cpu               103 tools/perf/util/cpumap.c 		"devices/system/cpu/cpu%d/topology/%s", cpu, name);
cpu               108 tools/perf/util/cpumap.c int cpu_map__get_socket_id(int cpu)
cpu               110 tools/perf/util/cpumap.c 	int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
cpu               116 tools/perf/util/cpumap.c 	int cpu;
cpu               121 tools/perf/util/cpumap.c 	cpu = map->map[idx];
cpu               123 tools/perf/util/cpumap.c 	return cpu_map__get_socket_id(cpu);
cpu               132 tools/perf/util/cpumap.c 		       int (*f)(struct perf_cpu_map *map, int cpu, void *data),
cpu               137 tools/perf/util/cpumap.c 	int cpu, s1, s2;
cpu               144 tools/perf/util/cpumap.c 	for (cpu = 0; cpu < nr; cpu++) {
cpu               145 tools/perf/util/cpumap.c 		s1 = f(cpus, cpu, data);
cpu               163 tools/perf/util/cpumap.c int cpu_map__get_die_id(int cpu)
cpu               165 tools/perf/util/cpumap.c 	int value, ret = cpu__get_topology_int(cpu, "die_id", &value);
cpu               172 tools/perf/util/cpumap.c 	int cpu, die_id, s;
cpu               177 tools/perf/util/cpumap.c 	cpu = map->map[idx];
cpu               179 tools/perf/util/cpumap.c 	die_id = cpu_map__get_die_id(cpu);
cpu               203 tools/perf/util/cpumap.c int cpu_map__get_core_id(int cpu)
cpu               205 tools/perf/util/cpumap.c 	int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
cpu               211 tools/perf/util/cpumap.c 	int cpu, s_die;
cpu               216 tools/perf/util/cpumap.c 	cpu = map->map[idx];
cpu               218 tools/perf/util/cpumap.c 	cpu = cpu_map__get_core_id(cpu);
cpu               232 tools/perf/util/cpumap.c 	if (WARN_ONCE(cpu >> 16, "The core id number is too big.\n"))
cpu               235 tools/perf/util/cpumap.c 	return (s_die << 16) | (cpu & 0xffff);
cpu               378 tools/perf/util/cpumap.c int cpu__get_node(int cpu)
cpu               385 tools/perf/util/cpumap.c 	return cpunode_map[cpu];
cpu               411 tools/perf/util/cpumap.c 	unsigned int cpu, mem;
cpu               450 tools/perf/util/cpumap.c 			if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
cpu               452 tools/perf/util/cpumap.c 			cpunode_map[cpu] = mem;
cpu               460 tools/perf/util/cpumap.c bool cpu_map__has(struct perf_cpu_map *cpus, int cpu)
cpu               462 tools/perf/util/cpumap.c 	return perf_cpu_map__idx(cpus, cpu) != -1;
cpu               472 tools/perf/util/cpumap.c 	int i, cpu, start = -1;
cpu               481 tools/perf/util/cpumap.c 		cpu = last ? INT_MAX : map->map[i];
cpu               490 tools/perf/util/cpumap.c 		} else if (((i - start) != (cpu - map->map[start])) || last) {
cpu               524 tools/perf/util/cpumap.c 	int i, cpu;
cpu               539 tools/perf/util/cpumap.c 		cpu = cpu_map__cpu(map, i);
cpu               540 tools/perf/util/cpumap.c 		bitmap[cpu / 8] |= 1 << (cpu % 8);
cpu               543 tools/perf/util/cpumap.c 	for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) {
cpu               544 tools/perf/util/cpumap.c 		unsigned char bits = bitmap[cpu / 8];
cpu               546 tools/perf/util/cpumap.c 		if (cpu % 8)
cpu               552 tools/perf/util/cpumap.c 		if ((cpu % 32) == 0 && cpu > 0)
cpu                17 tools/perf/util/cpumap.h int cpu_map__get_socket_id(int cpu);
cpu                19 tools/perf/util/cpumap.h int cpu_map__get_die_id(int cpu);
cpu                21 tools/perf/util/cpumap.h int cpu_map__get_core_id(int cpu);
cpu                55 tools/perf/util/cpumap.h int cpu__get_node(int cpu);
cpu                58 tools/perf/util/cpumap.h 		       int (*f)(struct perf_cpu_map *map, int cpu, void *data),
cpu                62 tools/perf/util/cpumap.h bool cpu_map__has(struct perf_cpu_map *cpus, int cpu);
cpu                31 tools/perf/util/cputopo.c static int build_cpu_topology(struct cpu_topology *tp, int cpu)
cpu                42 tools/perf/util/cputopo.c 		  sysfs__mountpoint(), cpu);
cpu                73 tools/perf/util/cputopo.c 		  sysfs__mountpoint(), cpu);
cpu               101 tools/perf/util/cputopo.c 		  sysfs__mountpoint(), cpu);
cpu               104 tools/perf/util/cputopo.c 			  sysfs__mountpoint(), cpu);
cpu               347 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c 	int cpu;
cpu               352 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c 	if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
cpu               362 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c 	packet_queue->packet_buffer[et].cpu = cpu;
cpu               142 tools/perf/util/cs-etm.c int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
cpu               152 tools/perf/util/cs-etm.c 	*cpu = (int)metadata[CS_ETM_CPU];
cpu               212 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].cpu = INT_MIN;
cpu              1056 tools/perf/util/cs-etm.c 	int cpu, err = -EINVAL;
cpu              1064 tools/perf/util/cs-etm.c 	if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
cpu              1067 tools/perf/util/cs-etm.c 	err = machine__set_current_tid(etm->machine, cpu, tid, tid);
cpu              1131 tools/perf/util/cs-etm.c 	sample.cpu = tidq->packet->cpu;
cpu              1192 tools/perf/util/cs-etm.c 	sample.cpu = tidq->packet->cpu;
cpu              2379 tools/perf/util/cs-etm.c 	int i, j, cpu = 0;
cpu              2384 tools/perf/util/cs-etm.c 	for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
cpu               136 tools/perf/util/cs-etm.h 	int cpu;
cpu               178 tools/perf/util/cs-etm.h int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
cpu               195 tools/perf/util/cs-etm.h 				  int *cpu __maybe_unused)
cpu                51 tools/perf/util/data-convert-bt.c 	int cpu;
cpu               670 tools/perf/util/data-convert-bt.c 			pr_err("CTF stream %d flush failed\n", cs->cpu);
cpu               673 tools/perf/util/data-convert-bt.c 		   cs->cpu, cs->count);
cpu               681 tools/perf/util/data-convert-bt.c static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
cpu               714 tools/perf/util/data-convert-bt.c 	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
cpu               722 tools/perf/util/data-convert-bt.c 	cs->cpu    = cpu;
cpu               744 tools/perf/util/data-convert-bt.c static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
cpu               746 tools/perf/util/data-convert-bt.c 	struct ctf_stream *cs = cw->stream[cpu];
cpu               749 tools/perf/util/data-convert-bt.c 		cs = ctf_stream__create(cw, cpu);
cpu               750 tools/perf/util/data-convert-bt.c 		cw->stream[cpu] = cs;
cpu               759 tools/perf/util/data-convert-bt.c 	int cpu = 0;
cpu               762 tools/perf/util/data-convert-bt.c 		cpu = sample->cpu;
cpu               764 tools/perf/util/data-convert-bt.c 	if (cpu > cw->stream_cnt) {
cpu               766 tools/perf/util/data-convert-bt.c 			cpu, cw->stream_cnt);
cpu               767 tools/perf/util/data-convert-bt.c 		cpu = 0;
cpu               770 tools/perf/util/data-convert-bt.c 	return cpu;
cpu              1352 tools/perf/util/data-convert-bt.c 	int cpu;
cpu              1354 tools/perf/util/data-convert-bt.c 	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
cpu              1355 tools/perf/util/data-convert-bt.c 		ctf_stream__delete(cw->stream[cpu]);
cpu              1558 tools/perf/util/data-convert-bt.c 	int cpu, ret = 0;
cpu              1560 tools/perf/util/data-convert-bt.c 	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
cpu              1561 tools/perf/util/data-convert-bt.c 		ret = ctf_stream__flush(cw->stream[cpu]);
cpu                16 tools/perf/util/dump-insn.h 	int	      cpu;
cpu               181 tools/perf/util/env.c 	zfree(&env->cpu);
cpu               233 tools/perf/util/env.c 	int cpu, nr_cpus;
cpu               235 tools/perf/util/env.c 	if (env->cpu != NULL)
cpu               245 tools/perf/util/env.c 	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
cpu               246 tools/perf/util/env.c 	if (env->cpu == NULL)
cpu               249 tools/perf/util/env.c 	for (cpu = 0; cpu < nr_cpus; ++cpu) {
cpu               250 tools/perf/util/env.c 		env->cpu[cpu].core_id	= cpu_map__get_core_id(cpu);
cpu               251 tools/perf/util/env.c 		env->cpu[cpu].socket_id	= cpu_map__get_socket_id(cpu);
cpu               252 tools/perf/util/env.c 		env->cpu[cpu].die_id	= cpu_map__get_die_id(cpu);
cpu                66 tools/perf/util/env.h 	struct cpu_topology_map	*cpu;
cpu               576 tools/perf/util/event.c 	al->cpu = sample->cpu;
cpu               580 tools/perf/util/event.c 	if (al->cpu >= 0) {
cpu               583 tools/perf/util/event.c 		if (env && env->cpu)
cpu               584 tools/perf/util/event.c 			al->socket = env->cpu[al->cpu].socket_id;
cpu               648 tools/perf/util/event.c 	al->cpu = sample->cpu;
cpu               129 tools/perf/util/event.h 	u32 cpu;
cpu               356 tools/perf/util/evlist.c 					 struct evsel *evsel, int cpu)
cpu               365 tools/perf/util/evlist.c 		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
cpu               376 tools/perf/util/evlist.c 	int cpu;
cpu               382 tools/perf/util/evlist.c 	for (cpu = 0; cpu < nr_cpus; cpu++) {
cpu               383 tools/perf/util/evlist.c 		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
cpu               427 tools/perf/util/evlist.c 				     struct evsel *evsel, int idx, int cpu,
cpu               430 tools/perf/util/evlist.c 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
cpu               432 tools/perf/util/evlist.c 	if (evlist->core.cpus && cpu >= 0)
cpu               433 tools/perf/util/evlist.c 		sid->cpu = evlist->core.cpus->map[cpu];
cpu               435 tools/perf/util/evlist.c 		sid->cpu = -1;
cpu               645 tools/perf/util/evlist.c 		int cpu;
cpu               666 tools/perf/util/evlist.c 		cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
cpu               667 tools/perf/util/evlist.c 		if (cpu == -1)
cpu               670 tools/perf/util/evlist.c 		fd = FD(evsel, cpu, thread);
cpu               700 tools/perf/util/evlist.c 			if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, cpu, thread,
cpu               703 tools/perf/util/evlist.c 			perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
cpu               714 tools/perf/util/evlist.c 	int cpu, thread;
cpu               719 tools/perf/util/evlist.c 	for (cpu = 0; cpu < nr_cpus; cpu++) {
cpu               723 tools/perf/util/evlist.c 		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
cpu               727 tools/perf/util/evlist.c 			if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
cpu              1169 tools/perf/util/evlist.c 		size += sizeof(data->cpu) * 2;
cpu              1266 tools/perf/util/evsel.c void perf_evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
cpu              1274 tools/perf/util/evsel.c 	if (cpu == -1) {
cpu              1278 tools/perf/util/evsel.c 		tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
cpu              1279 tools/perf/util/evsel.c 		*perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
cpu              1307 tools/perf/util/evsel.c perf_evsel__read_one(struct evsel *evsel, int cpu, int thread)
cpu              1309 tools/perf/util/evsel.c 	struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
cpu              1311 tools/perf/util/evsel.c 	return perf_evsel__read(&evsel->core, cpu, thread, count);
cpu              1315 tools/perf/util/evsel.c perf_evsel__set_count(struct evsel *counter, int cpu, int thread,
cpu              1320 tools/perf/util/evsel.c 	count = perf_counts(counter->counts, cpu, thread);
cpu              1326 tools/perf/util/evsel.c 	perf_counts__set_loaded(counter->counts, cpu, thread, true);
cpu              1331 tools/perf/util/evsel.c 			       int cpu, int thread, u64 *data)
cpu              1350 tools/perf/util/evsel.c 	perf_evsel__set_count(leader, cpu, thread,
cpu              1360 tools/perf/util/evsel.c 		perf_evsel__set_count(counter, cpu, thread,
cpu              1368 tools/perf/util/evsel.c perf_evsel__read_group(struct evsel *leader, int cpu, int thread)
cpu              1389 tools/perf/util/evsel.c 	if (FD(leader, cpu, thread) < 0)
cpu              1392 tools/perf/util/evsel.c 	if (readn(FD(leader, cpu, thread), data, size) <= 0)
cpu              1395 tools/perf/util/evsel.c 	return perf_evsel__process_group_data(leader, cpu, thread, data);
cpu              1398 tools/perf/util/evsel.c int perf_evsel__read_counter(struct evsel *evsel, int cpu, int thread)
cpu              1403 tools/perf/util/evsel.c 		return perf_evsel__read_group(evsel, cpu, thread);
cpu              1405 tools/perf/util/evsel.c 		return perf_evsel__read_one(evsel, cpu, thread);
cpu              1409 tools/perf/util/evsel.c 			      int cpu, int thread, bool scale)
cpu              1414 tools/perf/util/evsel.c 	if (FD(evsel, cpu, thread) < 0)
cpu              1417 tools/perf/util/evsel.c 	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
cpu              1420 tools/perf/util/evsel.c 	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
cpu              1423 tools/perf/util/evsel.c 	perf_evsel__compute_deltas(evsel, cpu, thread, &count);
cpu              1425 tools/perf/util/evsel.c 	*perf_counts(evsel->counts, cpu, thread) = count;
cpu              1429 tools/perf/util/evsel.c static int get_group_fd(struct evsel *evsel, int cpu, int thread)
cpu              1443 tools/perf/util/evsel.c 	fd = FD(leader, cpu, thread);
cpu              1453 tools/perf/util/evsel.c 	for (int cpu = 0; cpu < nr_cpus; cpu++)
cpu              1455 tools/perf/util/evsel.c 			FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
cpu              1483 tools/perf/util/evsel.c 				  int nr_cpus, int cpu,
cpu              1508 tools/perf/util/evsel.c 	if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
cpu              1536 tools/perf/util/evsel.c 			   pid_t pid, int cpu, int group_fd,
cpu              1544 tools/perf/util/evsel.c 			  pid, cpu, group_fd, flags);
cpu              1546 tools/perf/util/evsel.c 		fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, flags);
cpu              1575 tools/perf/util/evsel.c 	int cpu, thread, nthreads;
cpu              1650 tools/perf/util/evsel.c 	for (cpu = 0; cpu < cpus->nr; cpu++) {
cpu              1658 tools/perf/util/evsel.c 			group_fd = get_group_fd(evsel, cpu, thread);
cpu              1662 tools/perf/util/evsel.c 			fd = perf_event_open(evsel, pid, cpus->map[cpu],
cpu              1665 tools/perf/util/evsel.c 			FD(evsel, cpu, thread) = fd;
cpu              1670 tools/perf/util/evsel.c 				if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
cpu              1748 tools/perf/util/evsel.c 	if (err != -EINVAL || cpu > 0 || thread > 0)
cpu              1817 tools/perf/util/evsel.c 			close(FD(evsel, cpu, thread));
cpu              1818 tools/perf/util/evsel.c 			FD(evsel, cpu, thread) = -1;
cpu              1821 tools/perf/util/evsel.c 	} while (--cpu >= 0);
cpu              1868 tools/perf/util/evsel.c 		sample->cpu = u.val32[0];
cpu              1950 tools/perf/util/evsel.c 	data->cpu = data->pid = data->tid = -1;
cpu              2022 tools/perf/util/evsel.c 		data->cpu = u.val32[0];
cpu              2521 tools/perf/util/evsel.c 	int cpu, thread;
cpu              2523 tools/perf/util/evsel.c 	for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) {
cpu              2526 tools/perf/util/evsel.c 			int fd = FD(evsel, cpu, thread);
cpu              2529 tools/perf/util/evsel.c 						   cpu, thread, fd) < 0)
cpu               140 tools/perf/util/evsel.h void perf_evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
cpu               269 tools/perf/util/evsel.h int perf_evsel__read_counter(struct evsel *evsel, int cpu, int thread);
cpu               272 tools/perf/util/evsel.h 			      int cpu, int thread, bool scale);
cpu               282 tools/perf/util/evsel.h 					  int cpu, int thread)
cpu               284 tools/perf/util/evsel.h 	return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
cpu               295 tools/perf/util/evsel.h 						 int cpu, int thread)
cpu               297 tools/perf/util/evsel.h 	return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
cpu               605 tools/perf/util/header.c 		ret = do_write(ff, &perf_env.cpu[j].core_id,
cpu               606 tools/perf/util/header.c 			       sizeof(perf_env.cpu[j].core_id));
cpu               609 tools/perf/util/header.c 		ret = do_write(ff, &perf_env.cpu[j].socket_id,
cpu               610 tools/perf/util/header.c 			       sizeof(perf_env.cpu[j].socket_id));
cpu               629 tools/perf/util/header.c 		ret = do_write(ff, &perf_env.cpu[j].die_id,
cpu               630 tools/perf/util/header.c 			       sizeof(perf_env.cpu[j].die_id));
cpu              1031 tools/perf/util/header.c static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
cpu              1037 tools/perf/util/header.c 	scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
cpu              1097 tools/perf/util/header.c 	u32 nr, cpu;
cpu              1102 tools/perf/util/header.c 	for (cpu = 0; cpu < nr; cpu++) {
cpu              1107 tools/perf/util/header.c 			err = cpu_cache_level__read(&c, cpu, level);
cpu              1492 tools/perf/util/header.c 		if (ph->env.cpu != NULL) {
cpu              1496 tools/perf/util/header.c 					    i, ph->env.cpu[i].core_id,
cpu              1497 tools/perf/util/header.c 					    ph->env.cpu[i].die_id,
cpu              1498 tools/perf/util/header.c 					    ph->env.cpu[i].socket_id);
cpu              1503 tools/perf/util/header.c 		if (ph->env.cpu != NULL) {
cpu              1507 tools/perf/util/header.c 					    i, ph->env.cpu[i].core_id,
cpu              1508 tools/perf/util/header.c 					    ph->env.cpu[i].socket_id);
cpu              2205 tools/perf/util/header.c 	ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
cpu              2206 tools/perf/util/header.c 	if (!ph->env.cpu)
cpu              2254 tools/perf/util/header.c 		zfree(&ph->env.cpu);
cpu              2271 tools/perf/util/header.c 		ph->env.cpu[i].core_id = nr;
cpu              2283 tools/perf/util/header.c 		ph->env.cpu[i].socket_id = nr;
cpu              2317 tools/perf/util/header.c 		ph->env.cpu[i].die_id = nr;
cpu              2325 tools/perf/util/header.c 	zfree(&ph->env.cpu);
cpu               668 tools/perf/util/hist.c 	r->cpu = sample->cpu;
cpu               697 tools/perf/util/hist.c 		.cpu	 = al->cpu,
cpu              1060 tools/perf/util/hist.c 		.cpu = al->cpu,
cpu                74 tools/perf/util/intel-bts.c 	int			cpu;
cpu               137 tools/perf/util/intel-bts.c 			     INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
cpu               161 tools/perf/util/intel-bts.c 	btsq->cpu = -1;
cpu               181 tools/perf/util/intel-bts.c 		if (queue->cpu != -1)
cpu               182 tools/perf/util/intel-bts.c 			btsq->cpu = queue->cpu;
cpu               292 tools/perf/util/intel-bts.c 	sample.cpu = btsq->cpu;
cpu               344 tools/perf/util/intel-bts.c static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
cpu               351 tools/perf/util/intel-bts.c 			     INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
cpu               385 tools/perf/util/intel-bts.c 			err = intel_bts_synth_error(btsq->bts, btsq->cpu,
cpu               431 tools/perf/util/intel-bts.c 			thread_stack__event(thread, btsq->cpu, btsq->sample_flags,
cpu               503 tools/perf/util/intel-bts.c 		thread_stack__set_trace_nr(thread, btsq->cpu, buffer->buffer_nr + 1);
cpu               155 tools/perf/util/intel-pt.c 	int cpu;
cpu               892 tools/perf/util/intel-pt.c 	ptq->cpu = -1;
cpu               976 tools/perf/util/intel-pt.c 		ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
cpu               985 tools/perf/util/intel-pt.c 		if (queue->cpu == -1)
cpu               986 tools/perf/util/intel-pt.c 			ptq->cpu = ptq->thread->cpu;
cpu              1053 tools/perf/util/intel-pt.c 		if (queue->cpu != -1)
cpu              1054 tools/perf/util/intel-pt.c 			ptq->cpu = queue->cpu;
cpu              1079 tools/perf/util/intel-pt.c 			     queue_nr, ptq->cpu, ptq->pid, ptq->tid);
cpu              1211 tools/perf/util/intel-pt.c 	sample->cpu = ptq->cpu;
cpu              1324 tools/perf/util/intel-pt.c 		thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
cpu              1752 tools/perf/util/intel-pt.c 		thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
cpu              1824 tools/perf/util/intel-pt.c static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
cpu              1834 tools/perf/util/intel-pt.c 			     code, cpu, pid, tid, ip, msg, timestamp);
cpu              1852 tools/perf/util/intel-pt.c 	return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
cpu              1865 tools/perf/util/intel-pt.c 	intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
cpu              1867 tools/perf/util/intel-pt.c 	err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
cpu              1972 tools/perf/util/intel-pt.c 		thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, state->from_ip,
cpu              1976 tools/perf/util/intel-pt.c 		thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
cpu              2179 tools/perf/util/intel-pt.c 		     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
cpu              2325 tools/perf/util/intel-pt.c 	return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
cpu              2329 tools/perf/util/intel-pt.c static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
cpu              2333 tools/perf/util/intel-pt.c 	if (cpu < 0 || !pt->queues.nr_queues)
cpu              2336 tools/perf/util/intel-pt.c 	if ((unsigned)cpu >= pt->queues.nr_queues)
cpu              2339 tools/perf/util/intel-pt.c 		i = cpu;
cpu              2341 tools/perf/util/intel-pt.c 	if (pt->queues.queue_array[i].cpu == cpu)
cpu              2345 tools/perf/util/intel-pt.c 		if (pt->queues.queue_array[--i].cpu == cpu)
cpu              2350 tools/perf/util/intel-pt.c 		if (pt->queues.queue_array[j].cpu == cpu)
cpu              2357 tools/perf/util/intel-pt.c static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
cpu              2366 tools/perf/util/intel-pt.c 	ptq = intel_pt_cpu_to_ptq(pt, cpu);
cpu              2391 tools/perf/util/intel-pt.c 		intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
cpu              2407 tools/perf/util/intel-pt.c 	int cpu, ret;
cpu              2414 tools/perf/util/intel-pt.c 	cpu = sample->cpu;
cpu              2417 tools/perf/util/intel-pt.c 		     cpu, tid, sample->time, perf_time_to_tsc(sample->time,
cpu              2420 tools/perf/util/intel-pt.c 	ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
cpu              2424 tools/perf/util/intel-pt.c 	return machine__set_current_tid(pt->machine, cpu, -1, tid);
cpu              2432 tools/perf/util/intel-pt.c 	int cpu = sample->cpu;
cpu              2437 tools/perf/util/intel-pt.c 		ptq = intel_pt_cpu_to_ptq(pt, cpu);
cpu              2459 tools/perf/util/intel-pt.c 	if (machine__get_current_tid(pt->machine, cpu) == tid)
cpu              2462 tools/perf/util/intel-pt.c 	return machine__set_current_tid(pt->machine, cpu, pid, tid);
cpu              2470 tools/perf/util/intel-pt.c 	int cpu, ret;
cpu              2472 tools/perf/util/intel-pt.c 	cpu = sample->cpu;
cpu              2496 tools/perf/util/intel-pt.c 		     cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
cpu              2499 tools/perf/util/intel-pt.c 	ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
cpu              2503 tools/perf/util/intel-pt.c 	return machine__set_current_tid(pt->machine, cpu, pid, tid);
cpu              2514 tools/perf/util/intel-pt.c 		     sample->cpu, event->itrace_start.pid,
cpu              2518 tools/perf/util/intel-pt.c 	return machine__set_current_tid(pt->machine, sample->cpu,
cpu              2581 tools/perf/util/intel-pt.c 		     event->header.type, sample->cpu, sample->time, timestamp);
cpu              1026 tools/perf/util/machine.c 	int nr_cpus_avail, cpu;
cpu              1057 tools/perf/util/machine.c 	for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
cpu              1059 tools/perf/util/machine.c 			 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
cpu              2588 tools/perf/util/machine.c pid_t machine__get_current_tid(struct machine *machine, int cpu)
cpu              2592 tools/perf/util/machine.c 	if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
cpu              2595 tools/perf/util/machine.c 	return machine->current_tid[cpu];
cpu              2598 tools/perf/util/machine.c int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
cpu              2604 tools/perf/util/machine.c 	if (cpu < 0)
cpu              2617 tools/perf/util/machine.c 	if (cpu >= nr_cpus) {
cpu              2618 tools/perf/util/machine.c 		pr_err("Requested CPU %d too large. ", cpu);
cpu              2623 tools/perf/util/machine.c 	machine->current_tid[cpu] = tid;
cpu              2629 tools/perf/util/machine.c 	thread->cpu = cpu;
cpu               254 tools/perf/util/machine.h pid_t machine__get_current_tid(struct machine *machine, int cpu);
cpu               255 tools/perf/util/machine.h int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
cpu               191 tools/perf/util/mmap.c static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
cpu               200 tools/perf/util/mmap.c 		node_mask = 1UL << cpu__get_node(cpu);
cpu               203 tools/perf/util/mmap.c 				data, data + mmap_len, cpu__get_node(cpu));
cpu               226 tools/perf/util/mmap.c 		int cpu __maybe_unused, int affinity __maybe_unused)
cpu               260 tools/perf/util/mmap.c 			ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
cpu               332 tools/perf/util/mmap.c 	int c, cpu, nr_cpus;
cpu               341 tools/perf/util/mmap.c 		cpu = cpu_map->map[c]; /* map c index to online cpu index */
cpu               342 tools/perf/util/mmap.c 		if (cpu__get_node(cpu) == node)
cpu               343 tools/perf/util/mmap.c 			CPU_SET(cpu, mask);
cpu               351 tools/perf/util/mmap.c 		build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
cpu               353 tools/perf/util/mmap.c 		CPU_SET(map->core.cpu, &map->affinity_mask);
cpu               356 tools/perf/util/mmap.c int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
cpu               383 tools/perf/util/mmap.c 	map->core.cpu = cpu;
cpu                44 tools/perf/util/mmap.h int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
cpu               121 tools/perf/util/python.c 	sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
cpu               993 tools/perf/util/python.c static struct mmap *get_md(struct evlist *evlist, int cpu)
cpu              1000 tools/perf/util/python.c 		if (md->core.cpu == cpu)
cpu              1012 tools/perf/util/python.c 	int sample_id_all = 1, cpu;
cpu              1018 tools/perf/util/python.c 					 &cpu, &sample_id_all))
cpu              1021 tools/perf/util/python.c 	md = get_md(evlist, cpu);
cpu              1386 tools/perf/util/python.c void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
cpu                18 tools/perf/util/record.c static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
cpu                36 tools/perf/util/record.c 		fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags);
cpu                50 tools/perf/util/record.c 	fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags);
cpu                68 tools/perf/util/record.c 	int cpu, ret, i = 0;
cpu                73 tools/perf/util/record.c 	cpu = cpus->map[0];
cpu                77 tools/perf/util/record.c 		ret = perf_do_probe_api(fn, cpu, try[i++]);
cpu               123 tools/perf/util/record.c 	int cpu, fd;
cpu               128 tools/perf/util/record.c 	cpu = cpus->map[0];
cpu               131 tools/perf/util/record.c 	fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
cpu               267 tools/perf/util/record.c 	int err, fd, cpu;
cpu               284 tools/perf/util/record.c 		cpu =  cpus ? cpus->map[0] : 0;
cpu               287 tools/perf/util/record.c 		cpu = evlist->core.cpus->map[0];
cpu               291 tools/perf/util/record.c 		fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1,
cpu               185 tools/perf/util/s390-cpumsf.c 	int			cpu;
cpu               202 tools/perf/util/s390-cpumsf.c 	if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu)
cpu               205 tools/perf/util/s390-cpumsf.c 	q = &sf->queues.queue_array[sample->cpu];
cpu               215 tools/perf/util/s390-cpumsf.c 				 sf->logdir, sample->cpu)
cpu               216 tools/perf/util/s390-cpumsf.c 			: asprintf(&name, "aux.ctr.%02x", sample->cpu);
cpu               511 tools/perf/util/s390-cpumsf.c 				.cpu = sfq->cpu,
cpu               537 tools/perf/util/s390-cpumsf.c 		 sample.tid, sample.cpumode, sample.cpu);
cpu               777 tools/perf/util/s390-cpumsf.c 	sfq->cpu = -1;
cpu               813 tools/perf/util/s390-cpumsf.c 		if (queue->cpu != -1)
cpu               814 tools/perf/util/s390-cpumsf.c 			sfq->cpu = queue->cpu;
cpu               885 tools/perf/util/s390-cpumsf.c static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
cpu               894 tools/perf/util/s390-cpumsf.c 			     code, cpu, pid, tid, ip, msg, timestamp);
cpu               905 tools/perf/util/s390-cpumsf.c 	return s390_cpumsf_synth_error(sf, 1, sample->cpu,
cpu               349 tools/perf/util/scripting-engines/trace-event-perl.c 	int cpu = sample->cpu;
cpu               383 tools/perf/util/scripting-engines/trace-event-perl.c 	XPUSHs(sv_2mortal(newSVuv(cpu)));
cpu               419 tools/perf/util/scripting-engines/trace-event-perl.c 		XPUSHs(sv_2mortal(newSVuv(cpu)));
cpu               747 tools/perf/util/scripting-engines/trace-event-python.c 			_PyLong_FromLong(sample->cpu));
cpu               804 tools/perf/util/scripting-engines/trace-event-python.c 	int cpu = sample->cpu;
cpu               855 tools/perf/util/scripting-engines/trace-event-python.c 		PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu));
cpu               862 tools/perf/util/scripting-engines/trace-event-python.c 		pydict_set_item_string_decref(dict, "common_cpu", _PyLong_FromLong(cpu));
cpu              1138 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 10, es->sample->cpu);
cpu              1255 tools/perf/util/scripting-engines/trace-event-python.c 	tuple_set_s32(t, 3, sample->cpu);
cpu              1356 tools/perf/util/scripting-engines/trace-event-python.c process_stat(struct evsel *counter, int cpu, int thread, u64 tstamp,
cpu              1376 tools/perf/util/scripting-engines/trace-event-python.c 	PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu));
cpu              1397 tools/perf/util/scripting-engines/trace-event-python.c 	int cpu, thread;
cpu              1406 tools/perf/util/scripting-engines/trace-event-python.c 		for (cpu = 0; cpu < cpus->nr; cpu++) {
cpu              1407 tools/perf/util/scripting-engines/trace-event-python.c 			process_stat(counter, cpus->map[cpu],
cpu              1409 tools/perf/util/scripting-engines/trace-event-python.c 				     perf_counts(counter->counts, cpu, thread));
cpu               817 tools/perf/util/session.c 	event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
cpu               825 tools/perf/util/session.c 	event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
cpu               862 tools/perf/util/session.c 			cpus->cpu[i] = bswap_16(cpus->cpu[i]);
cpu               896 tools/perf/util/session.c 	event->stat.cpu    = bswap_32(event->stat.cpu);
cpu              1164 tools/perf/util/session.c 		printf("%u ", sample->cpu);
cpu              2339 tools/perf/util/session.c 		int cpu = map->map[i];
cpu              2341 tools/perf/util/session.c 		if (cpu >= nr_cpus) {
cpu              2343 tools/perf/util/session.c 			       "Consider raising MAX_NR_CPUS\n", cpu);
cpu              2347 tools/perf/util/session.c 		set_bit(cpu, cpu_bitmap);
cpu              2420 tools/perf/util/session.c 			fprintf(stdout,	"  cpu: %"PRI_ld64, e->cpu);
cpu              2428 tools/perf/util/session.c 		sid->cpu = e->cpu;
cpu                12 tools/perf/util/smt.c 	int cpu;
cpu                19 tools/perf/util/smt.c 	for (cpu = 0; cpu < ncpu; cpu++) {
cpu                26 tools/perf/util/smt.c 			"devices/system/cpu/cpu%d/topology/core_cpus", cpu);
cpu                30 tools/perf/util/smt.c 				cpu);
cpu               580 tools/perf/util/sort.c 	return right->cpu - left->cpu;
cpu               586 tools/perf/util/sort.c 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
cpu                40 tools/perf/util/sort.h 	int cpu;
cpu               103 tools/perf/util/sort.h 	s32			cpu;
cpu               495 tools/perf/util/stat-display.c 	int cpu, s2, id, s;
cpu               503 tools/perf/util/stat-display.c 			for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
cpu               504 tools/perf/util/stat-display.c 				s2 = config->aggr_get_id(config, evlist->core.cpus, cpu);
cpu               507 tools/perf/util/stat-display.c 				val += perf_counts(counter->counts, cpu, 0)->val;
cpu               585 tools/perf/util/stat-display.c 	int cpu;
cpu               592 tools/perf/util/stat-display.c 	int cpu, s2;
cpu               594 tools/perf/util/stat-display.c 	for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
cpu               597 tools/perf/util/stat-display.c 		s2 = config->aggr_get_id(config, evsel__cpus(counter), cpu);
cpu               602 tools/perf/util/stat-display.c 		counts = perf_counts(counter->counts, cpu, 0);
cpu               700 tools/perf/util/stat-display.c 	int cpu, thread, i = 0;
cpu               711 tools/perf/util/stat-display.c 		for (cpu = 0; cpu < ncpus; cpu++) {
cpu               712 tools/perf/util/stat-display.c 			val += perf_counts(counter->counts, cpu, thread)->val;
cpu               713 tools/perf/util/stat-display.c 			ena += perf_counts(counter->counts, cpu, thread)->ena;
cpu               714 tools/perf/util/stat-display.c 			run += perf_counts(counter->counts, cpu, thread)->run;
cpu               825 tools/perf/util/stat-display.c 	ad->val += perf_counts(counter->counts, ad->cpu, 0)->val;
cpu               826 tools/perf/util/stat-display.c 	ad->ena += perf_counts(counter->counts, ad->cpu, 0)->ena;
cpu               827 tools/perf/util/stat-display.c 	ad->run += perf_counts(counter->counts, ad->cpu, 0)->run;
cpu               840 tools/perf/util/stat-display.c 	int cpu;
cpu               842 tools/perf/util/stat-display.c 	for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
cpu               843 tools/perf/util/stat-display.c 		struct aggr_data ad = { .cpu = cpu };
cpu               855 tools/perf/util/stat-display.c 		printout(config, cpu, 0, counter, uval, prefix, run, ena, 1.0,
cpu               866 tools/perf/util/stat-display.c 	int cpu;
cpu               873 tools/perf/util/stat-display.c 	for (cpu = 0; cpu < nrcpus; cpu++) {
cpu               880 tools/perf/util/stat-display.c 				aggr_printout(config, counter, cpu, 0);
cpu               883 tools/perf/util/stat-display.c 			val = perf_counts(counter->counts, cpu, 0)->val;
cpu               884 tools/perf/util/stat-display.c 			ena = perf_counts(counter->counts, cpu, 0)->ena;
cpu               885 tools/perf/util/stat-display.c 			run = perf_counts(counter->counts, cpu, 0)->run;
cpu               888 tools/perf/util/stat-display.c 			printout(config, cpu, 0, counter, uval, prefix, run, ena, 1.0,
cpu                30 tools/perf/util/stat-shadow.c 	int cpu;
cpu                44 tools/perf/util/stat-shadow.c 	if (a->cpu != b->cpu)
cpu                45 tools/perf/util/stat-shadow.c 		return a->cpu - b->cpu;
cpu                99 tools/perf/util/stat-shadow.c 					      int cpu,
cpu               108 tools/perf/util/stat-shadow.c 		.cpu = cpu,
cpu               196 tools/perf/util/stat-shadow.c 				int ctx, int cpu, u64 count)
cpu               198 tools/perf/util/stat-shadow.c 	struct saved_value *v = saved_value_lookup(NULL, cpu, true,
cpu               211 tools/perf/util/stat-shadow.c 				    int cpu, struct runtime_stat *st)
cpu               220 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
cpu               222 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
cpu               224 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
cpu               226 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
cpu               228 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
cpu               231 tools/perf/util/stat-shadow.c 				    ctx, cpu, count);
cpu               234 tools/perf/util/stat-shadow.c 				    ctx, cpu, count);
cpu               237 tools/perf/util/stat-shadow.c 				    ctx, cpu, count);
cpu               240 tools/perf/util/stat-shadow.c 				    ctx, cpu, count);
cpu               243 tools/perf/util/stat-shadow.c 				    ctx, cpu, count);
cpu               246 tools/perf/util/stat-shadow.c 				    ctx, cpu, count);
cpu               249 tools/perf/util/stat-shadow.c 				    ctx, cpu, count);
cpu               251 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
cpu               253 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
cpu               255 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
cpu               257 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
cpu               259 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
cpu               261 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
cpu               263 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
cpu               265 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
cpu               267 tools/perf/util/stat-shadow.c 		update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
cpu               270 tools/perf/util/stat-shadow.c 		v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st);
cpu               276 tools/perf/util/stat-shadow.c 				       cpu, true, STAT_NONE, 0, st);
cpu               400 tools/perf/util/stat-shadow.c 			       enum stat_type type, int ctx, int cpu)
cpu               404 tools/perf/util/stat-shadow.c 	v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
cpu               412 tools/perf/util/stat-shadow.c 			     enum stat_type type, int ctx, int cpu)
cpu               416 tools/perf/util/stat-shadow.c 	v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
cpu               424 tools/perf/util/stat-shadow.c 					  int cpu,
cpu               433 tools/perf/util/stat-shadow.c 	total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
cpu               448 tools/perf/util/stat-shadow.c 					 int cpu,
cpu               457 tools/perf/util/stat-shadow.c 	total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
cpu               468 tools/perf/util/stat-shadow.c 				int cpu,
cpu               478 tools/perf/util/stat-shadow.c 	total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
cpu               489 tools/perf/util/stat-shadow.c 				   int cpu,
cpu               500 tools/perf/util/stat-shadow.c 	total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
cpu               511 tools/perf/util/stat-shadow.c 				   int cpu,
cpu               522 tools/perf/util/stat-shadow.c 	total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
cpu               532 tools/perf/util/stat-shadow.c 				    int cpu,
cpu               542 tools/perf/util/stat-shadow.c 	total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
cpu               552 tools/perf/util/stat-shadow.c 				    int cpu,
cpu               562 tools/perf/util/stat-shadow.c 	total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
cpu               572 tools/perf/util/stat-shadow.c 				  int cpu,
cpu               582 tools/perf/util/stat-shadow.c 	total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
cpu               640 tools/perf/util/stat-shadow.c static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
cpu               642 tools/perf/util/stat-shadow.c 	return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
cpu               645 tools/perf/util/stat-shadow.c static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
cpu               651 tools/perf/util/stat-shadow.c 	total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
cpu               652 tools/perf/util/stat-shadow.c 		runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
cpu               653 tools/perf/util/stat-shadow.c 		runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
cpu               655 tools/perf/util/stat-shadow.c 	total_slots = td_total_slots(ctx, cpu, st);
cpu               661 tools/perf/util/stat-shadow.c static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
cpu               664 tools/perf/util/stat-shadow.c 	double total_slots = td_total_slots(ctx, cpu, st);
cpu               666 tools/perf/util/stat-shadow.c 					    ctx, cpu);
cpu               673 tools/perf/util/stat-shadow.c static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
cpu               676 tools/perf/util/stat-shadow.c 	double total_slots = td_total_slots(ctx, cpu, st);
cpu               678 tools/perf/util/stat-shadow.c 					    ctx, cpu);
cpu               685 tools/perf/util/stat-shadow.c static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
cpu               687 tools/perf/util/stat-shadow.c 	double sum = (td_fe_bound(ctx, cpu, st) +
cpu               688 tools/perf/util/stat-shadow.c 		      td_bad_spec(ctx, cpu, st) +
cpu               689 tools/perf/util/stat-shadow.c 		      td_retiring(ctx, cpu, st));
cpu               696 tools/perf/util/stat-shadow.c 			   int cpu, struct evsel *evsel,
cpu               704 tools/perf/util/stat-shadow.c 	smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
cpu               705 tools/perf/util/stat-shadow.c 	aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
cpu               706 tools/perf/util/stat-shadow.c 	cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
cpu               727 tools/perf/util/stat-shadow.c 			   int cpu,
cpu               750 tools/perf/util/stat-shadow.c 			v = saved_value_lookup(metric_events[i], cpu, false,
cpu               817 tools/perf/util/stat-shadow.c 				   double avg, int cpu,
cpu               831 tools/perf/util/stat-shadow.c 		total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
cpu               842 tools/perf/util/stat-shadow.c 					 ctx, cpu);
cpu               846 tools/perf/util/stat-shadow.c 						    ctx, cpu));
cpu               856 tools/perf/util/stat-shadow.c 		if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
cpu               857 tools/perf/util/stat-shadow.c 			print_branch_misses(config, cpu, evsel, avg, out, st);
cpu               866 tools/perf/util/stat-shadow.c 		if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
cpu               867 tools/perf/util/stat-shadow.c 			print_l1_dcache_misses(config, cpu, evsel, avg, out, st);
cpu               876 tools/perf/util/stat-shadow.c 		if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
cpu               877 tools/perf/util/stat-shadow.c 			print_l1_icache_misses(config, cpu, evsel, avg, out, st);
cpu               886 tools/perf/util/stat-shadow.c 		if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
cpu               887 tools/perf/util/stat-shadow.c 			print_dtlb_cache_misses(config, cpu, evsel, avg, out, st);
cpu               896 tools/perf/util/stat-shadow.c 		if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
cpu               897 tools/perf/util/stat-shadow.c 			print_itlb_cache_misses(config, cpu, evsel, avg, out, st);
cpu               906 tools/perf/util/stat-shadow.c 		if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
cpu               907 tools/perf/util/stat-shadow.c 			print_ll_cache_misses(config, cpu, evsel, avg, out, st);
cpu               911 tools/perf/util/stat-shadow.c 		total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
cpu               916 tools/perf/util/stat-shadow.c 		if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
cpu               922 tools/perf/util/stat-shadow.c 		print_stalled_cycles_frontend(config, cpu, evsel, avg, out, st);
cpu               924 tools/perf/util/stat-shadow.c 		print_stalled_cycles_backend(config, cpu, evsel, avg, out, st);
cpu               926 tools/perf/util/stat-shadow.c 		total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
cpu               935 tools/perf/util/stat-shadow.c 		total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
cpu               945 tools/perf/util/stat-shadow.c 		total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
cpu               946 tools/perf/util/stat-shadow.c 		total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
cpu               957 tools/perf/util/stat-shadow.c 					 ctx, cpu);
cpu               962 tools/perf/util/stat-shadow.c 		if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
cpu               970 tools/perf/util/stat-shadow.c 					 ctx, cpu);
cpu               983 tools/perf/util/stat-shadow.c 		double fe_bound = td_fe_bound(ctx, cpu, st);
cpu               990 tools/perf/util/stat-shadow.c 		double retiring = td_retiring(ctx, cpu, st);
cpu               997 tools/perf/util/stat-shadow.c 		double bad_spec = td_bad_spec(ctx, cpu, st);
cpu              1004 tools/perf/util/stat-shadow.c 		double be_bound = td_be_bound(ctx, cpu, st);
cpu              1017 tools/perf/util/stat-shadow.c 		if (td_total_slots(ctx, cpu, st) > 0)
cpu              1024 tools/perf/util/stat-shadow.c 				evsel->metric_name, NULL, avg, cpu, out, st);
cpu              1025 tools/perf/util/stat-shadow.c 	} else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
cpu              1029 tools/perf/util/stat-shadow.c 		total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
cpu              1040 tools/perf/util/stat-shadow.c 		print_smi_cost(config, cpu, evsel, out, st);
cpu              1053 tools/perf/util/stat-shadow.c 					mexp->metric_unit, avg, cpu, out, st);
cpu                89 tools/perf/util/stat.c 	ID(CYCLES_IN_TX,	cpu/cycles-t/),
cpu                90 tools/perf/util/stat.c 	ID(TRANSACTION_START,	cpu/tx-start/),
cpu                91 tools/perf/util/stat.c 	ID(ELISION_START,	cpu/el-start/),
cpu                92 tools/perf/util/stat.c 	ID(CYCLES_IN_TX_CP,	cpu/cycles-ct/),
cpu               239 tools/perf/util/stat.c 			 struct perf_counts_values *vals, int cpu, bool *skip)
cpu               272 tools/perf/util/stat.c 	s = cpu_map__get_socket(cpus, cpu, NULL);
cpu               282 tools/perf/util/stat.c 		       int cpu, int thread,
cpu               289 tools/perf/util/stat.c 	if (check_per_pkg(evsel, count, cpu, &skip)) {
cpu               304 tools/perf/util/stat.c 			perf_evsel__compute_deltas(evsel, cpu, thread, count);
cpu               308 tools/perf/util/stat.c 						       cpu, &rt_stat);
cpu               337 tools/perf/util/stat.c 	int cpu, thread;
cpu               343 tools/perf/util/stat.c 		for (cpu = 0; cpu < ncpus; cpu++) {
cpu               344 tools/perf/util/stat.c 			if (process_counter_values(config, counter, cpu, thread,
cpu               345 tools/perf/util/stat.c 						   perf_counts(counter->counts, cpu, thread)))
cpu               420 tools/perf/util/stat.c 	*perf_counts(counter->counts, st->cpu, st->thread) = count;
cpu               431 tools/perf/util/stat.c 		       st->id, st->cpu, st->thread);
cpu                94 tools/perf/util/stat.h 			     struct perf_cpu_map *m, int cpu);
cpu               177 tools/perf/util/stat.h 				    int cpu, struct runtime_stat *st);
cpu               187 tools/perf/util/stat.h 				   double avg, int cpu,
cpu                43 tools/perf/util/svghelper.c static double cpu2slot(int cpu)
cpu                45 tools/perf/util/svghelper.c 	return 2 * cpu + 1;
cpu                50 tools/perf/util/svghelper.c static double cpu2y(int cpu)
cpu                53 tools/perf/util/svghelper.c 		return cpu2slot(topology_map[cpu]) * SLOT_MULT;
cpu                55 tools/perf/util/svghelper.c 		return cpu2slot(cpu) * SLOT_MULT;
cpu               220 tools/perf/util/svghelper.c void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
cpu               226 tools/perf/util/svghelper.c 	fprintf(svgfile, "<title>#%d blocked %s</title>\n", cpu,
cpu               234 tools/perf/util/svghelper.c void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
cpu               249 tools/perf/util/svghelper.c 		cpu, time_to_string(end - start));
cpu               257 tools/perf/util/svghelper.c 	if (cpu > 9)
cpu               265 tools/perf/util/svghelper.c 			time2pixels(start), Yslot *  SLOT_MULT + SLOT_HEIGHT - 1, text_size,  cpu + 1);
cpu               288 tools/perf/util/svghelper.c void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
cpu               312 tools/perf/util/svghelper.c 	fprintf(svgfile, "<title>#%d waiting %s</title>\n", cpu, time_to_string(end - start));
cpu               356 tools/perf/util/svghelper.c void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq)
cpu               370 tools/perf/util/svghelper.c 		cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
cpu               372 tools/perf/util/svghelper.c 	sprintf(cpu_string, "CPU %i", (int)cpu);
cpu               374 tools/perf/util/svghelper.c 		10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string);
cpu               377 tools/perf/util/svghelper.c 		10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model());
cpu               382 tools/perf/util/svghelper.c void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace)
cpu               397 tools/perf/util/svghelper.c 	fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\">\n", time2pixels(start), cpu2y(cpu));
cpu               416 tools/perf/util/svghelper.c void svg_cstate(int cpu, u64 start, u64 end, int type)
cpu               434 tools/perf/util/svghelper.c 		cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
cpu               444 tools/perf/util/svghelper.c 			time2pixels(start), cpu2y(cpu)+width, width, type);
cpu               473 tools/perf/util/svghelper.c void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
cpu               484 tools/perf/util/svghelper.c 	height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height;
cpu               699 tools/perf/util/svghelper.c static void scan_thread_topology(int *map, struct topology *t, int cpu,
cpu               706 tools/perf/util/svghelper.c 		if (!test_bit(cpu, cpumask_bits(&t->sib_thr[i])))
cpu               719 tools/perf/util/svghelper.c 	int cpu;
cpu               722 tools/perf/util/svghelper.c 		for_each_set_bit(cpu, cpumask_bits(&t->sib_core[i]), nr_cpus)
cpu               723 tools/perf/util/svghelper.c 			scan_thread_topology(map, t, cpu, &pos, nr_cpus);
cpu                14 tools/perf/util/svghelper.h void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
cpu                15 tools/perf/util/svghelper.h void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
cpu                16 tools/perf/util/svghelper.h void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
cpu                17 tools/perf/util/svghelper.h void svg_cpu_box(int cpu, u64 max_frequency, u64 turbo_frequency);
cpu                20 tools/perf/util/svghelper.h void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace);
cpu                21 tools/perf/util/svghelper.h void svg_cstate(int cpu, u64 start, u64 end, int type);
cpu                22 tools/perf/util/svghelper.h void svg_pstate(int cpu, u64 start, u64 end, u64 freq);
cpu               129 tools/perf/util/symbol.h 	s32	      cpu;
cpu                58 tools/perf/util/synthetic-events.c 		.cpu	   = -1,
cpu               926 tools/perf/util/synthetic-events.c 		cpus->cpu[i] = map->map[i];
cpu              1089 tools/perf/util/synthetic-events.c 				u32 cpu, u32 thread, u64 id,
cpu              1101 tools/perf/util/synthetic-events.c 	event.cpu       = cpu;
cpu              1285 tools/perf/util/synthetic-events.c 		u.val32[0] = sample->cpu;
cpu              1454 tools/perf/util/synthetic-events.c 			e->cpu = sid->cpu;
cpu                52 tools/perf/util/synthetic-events.h int perf_event__synthesize_stat(struct perf_tool *tool, u32 cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine);
cpu               152 tools/perf/util/thread-stack.c static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
cpu               159 tools/perf/util/thread-stack.c 	if (thread_stack__per_cpu(thread) && cpu > 0)
cpu               160 tools/perf/util/thread-stack.c 		new_sz = roundup_pow_of_two(cpu + 1);
cpu               174 tools/perf/util/thread-stack.c 	if (thread_stack__per_cpu(thread) && cpu > 0 &&
cpu               175 tools/perf/util/thread-stack.c 	    (unsigned int)cpu < ts->arr_sz)
cpu               176 tools/perf/util/thread-stack.c 		ts += cpu;
cpu               185 tools/perf/util/thread-stack.c static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
cpu               189 tools/perf/util/thread-stack.c 	if (cpu < 0)
cpu               190 tools/perf/util/thread-stack.c 		cpu = 0;
cpu               192 tools/perf/util/thread-stack.c 	if (!ts || (unsigned int)cpu >= ts->arr_sz)
cpu               195 tools/perf/util/thread-stack.c 	ts += cpu;
cpu               204 tools/perf/util/thread-stack.c 						    int cpu)
cpu               210 tools/perf/util/thread-stack.c 		return thread__cpu_stack(thread, cpu);
cpu               356 tools/perf/util/thread-stack.c int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
cpu               359 tools/perf/util/thread-stack.c 	struct thread_stack *ts = thread__stack(thread, cpu);
cpu               365 tools/perf/util/thread-stack.c 		ts = thread_stack__new(thread, cpu, NULL);
cpu               415 tools/perf/util/thread-stack.c void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
cpu               417 tools/perf/util/thread-stack.c 	struct thread_stack *ts = thread__stack(thread, cpu);
cpu               461 tools/perf/util/thread-stack.c void thread_stack__sample(struct thread *thread, int cpu,
cpu               465 tools/perf/util/thread-stack.c 	struct thread_stack *ts = thread__stack(thread, cpu);
cpu               856 tools/perf/util/thread-stack.c 	struct thread_stack *ts = thread__stack(thread, sample->cpu);
cpu               867 tools/perf/util/thread-stack.c 		ts = thread_stack__new(thread, sample->cpu, crp);
cpu               981 tools/perf/util/thread-stack.c size_t thread_stack__depth(struct thread *thread, int cpu)
cpu               983 tools/perf/util/thread-stack.c 	struct thread_stack *ts = thread__stack(thread, cpu);
cpu                83 tools/perf/util/thread-stack.h int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
cpu                85 tools/perf/util/thread-stack.h void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr);
cpu                86 tools/perf/util/thread-stack.h void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain,
cpu                90 tools/perf/util/thread-stack.h size_t thread_stack__depth(struct thread *thread, int cpu);
cpu                49 tools/perf/util/thread.c 		thread->cpu = -1;
cpu                32 tools/perf/util/thread.h 	int			cpu;
cpu               101 tools/perf/util/trace-event-parse.c 			   int cpu, void *data, int size, FILE *fp)
cpu               107 tools/perf/util/trace-event-parse.c 	record.cpu = cpu;
cpu               118 tools/perf/util/trace-event-parse.c 			 int cpu, void *data, int size)
cpu               120 tools/perf/util/trace-event-parse.c 	return event_format__fprintf(event, cpu, data, size, stdout);
cpu                32 tools/perf/util/trace-event.h 			   int cpu, void *data, int size, FILE *fp);
cpu                35 tools/perf/util/trace-event.h 			 int cpu, void *data, int size);
cpu               103 tools/power/cpupower/bench/benchmark.c 		if (set_cpufreq_governor("performance", config->cpu) != 0)
cpu               146 tools/power/cpupower/bench/benchmark.c 		if (set_cpufreq_governor(config->governor, config->cpu) != 0)
cpu               101 tools/power/cpupower/bench/main.c 			sscanf(optarg, "%u", &config->cpu);
cpu               172 tools/power/cpupower/bench/main.c 		       config->cpu,
cpu               132 tools/power/cpupower/bench/parse.c 	config->cpu = 0;
cpu               208 tools/power/cpupower/bench/parse.c 			sscanf(val, "%u", &config->cpu);
cpu                18 tools/power/cpupower/bench/parse.h 	unsigned int cpu;	/* cpu for which the affinity is set */
cpu                46 tools/power/cpupower/bench/system.c int set_cpufreq_governor(char *governor, unsigned int cpu)
cpu                51 tools/power/cpupower/bench/system.c 	if (cpupower_is_cpu_online(cpu) != 1) {
cpu                53 tools/power/cpupower/bench/system.c 		fprintf(stderr, "error: cpu %u does not exist\n", cpu);
cpu                57 tools/power/cpupower/bench/system.c 	if (cpufreq_modify_policy_governor(cpu, governor) != 0) {
cpu                75 tools/power/cpupower/bench/system.c int set_cpu_affinity(unsigned int cpu)
cpu                80 tools/power/cpupower/bench/system.c 	CPU_SET(cpu, &cpuset);
cpu                82 tools/power/cpupower/bench/system.c 	dprintf("set affinity to cpu #%u\n", cpu);
cpu               155 tools/power/cpupower/bench/system.c 		printf("set cpu affinity to cpu #%u\n", config->cpu);
cpu               157 tools/power/cpupower/bench/system.c 	set_cpu_affinity(config->cpu);
cpu                11 tools/power/cpupower/bench/system.h int set_cpufreq_governor(char *governor, unsigned int cpu);
cpu                12 tools/power/cpupower/bench/system.h int set_cpu_affinity(unsigned int cpu);
cpu                28 tools/power/cpupower/debug/i386/centrino-decode.c static int rdmsr(unsigned int cpu, unsigned int msr,
cpu                38 tools/power/cpupower/debug/i386/centrino-decode.c 	if (cpu > MCPU)
cpu                41 tools/power/cpupower/debug/i386/centrino-decode.c 	sprintf(file, "/dev/cpu/%d/msr", cpu);
cpu                75 tools/power/cpupower/debug/i386/centrino-decode.c static int decode_live(unsigned int cpu)
cpu                80 tools/power/cpupower/debug/i386/centrino-decode.c 	err = rdmsr(cpu, MSR_IA32_PERF_STATUS, &lo, &hi);
cpu                83 tools/power/cpupower/debug/i386/centrino-decode.c 		printf("can't get MSR_IA32_PERF_STATUS for cpu %d\n", cpu);
cpu                96 tools/power/cpupower/debug/i386/centrino-decode.c 	unsigned int cpu, mode = 0;
cpu                99 tools/power/cpupower/debug/i386/centrino-decode.c 		cpu = 0;
cpu               101 tools/power/cpupower/debug/i386/centrino-decode.c 		cpu = strtoul(argv[1], NULL, 0);
cpu               102 tools/power/cpupower/debug/i386/centrino-decode.c 		if (cpu >= MCPU)
cpu               107 tools/power/cpupower/debug/i386/centrino-decode.c 		decode(cpu);
cpu               109 tools/power/cpupower/debug/i386/centrino-decode.c 		decode_live(cpu);
cpu                27 tools/power/cpupower/debug/i386/powernow-k8-decode.c static int get_fidvid(uint32_t cpu, uint32_t *fid, uint32_t *vid)
cpu                34 tools/power/cpupower/debug/i386/powernow-k8-decode.c 	if (cpu > MCPU)
cpu                37 tools/power/cpupower/debug/i386/powernow-k8-decode.c 	sprintf(file, "/dev/cpu/%d/msr", cpu);
cpu                71 tools/power/cpupower/debug/i386/powernow-k8-decode.c 	int cpu;
cpu                75 tools/power/cpupower/debug/i386/powernow-k8-decode.c 		cpu = 0;
cpu                77 tools/power/cpupower/debug/i386/powernow-k8-decode.c 		cpu = strtoul(argv[1], NULL, 0);
cpu                79 tools/power/cpupower/debug/i386/powernow-k8-decode.c 	err = get_fidvid(cpu, &fid, &vid);
cpu                90 tools/power/cpupower/debug/i386/powernow-k8-decode.c 			cpu,
cpu                23 tools/power/cpupower/lib/cpufreq.c static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
cpu                29 tools/power/cpupower/lib/cpufreq.c 			 cpu, fname);
cpu                35 tools/power/cpupower/lib/cpufreq.c static unsigned int sysfs_cpufreq_write_file(unsigned int cpu,
cpu                44 tools/power/cpupower/lib/cpufreq.c 			 cpu, fname);
cpu                87 tools/power/cpupower/lib/cpufreq.c static unsigned long sysfs_cpufreq_get_one_value(unsigned int cpu,
cpu                98 tools/power/cpupower/lib/cpufreq.c 	len = sysfs_cpufreq_read_file(cpu, cpufreq_value_files[which],
cpu               126 tools/power/cpupower/lib/cpufreq.c static char *sysfs_cpufreq_get_one_string(unsigned int cpu,
cpu               136 tools/power/cpupower/lib/cpufreq.c 	len = sysfs_cpufreq_read_file(cpu, cpufreq_string_files[which],
cpu               168 tools/power/cpupower/lib/cpufreq.c static int sysfs_cpufreq_write_one_value(unsigned int cpu,
cpu               175 tools/power/cpupower/lib/cpufreq.c 	if (sysfs_cpufreq_write_file(cpu, cpufreq_write_files[which],
cpu               182 tools/power/cpupower/lib/cpufreq.c unsigned long cpufreq_get_freq_kernel(unsigned int cpu)
cpu               184 tools/power/cpupower/lib/cpufreq.c 	return sysfs_cpufreq_get_one_value(cpu, SCALING_CUR_FREQ);
cpu               187 tools/power/cpupower/lib/cpufreq.c unsigned long cpufreq_get_freq_hardware(unsigned int cpu)
cpu               189 tools/power/cpupower/lib/cpufreq.c 	return sysfs_cpufreq_get_one_value(cpu, CPUINFO_CUR_FREQ);
cpu               192 tools/power/cpupower/lib/cpufreq.c unsigned long cpufreq_get_transition_latency(unsigned int cpu)
cpu               194 tools/power/cpupower/lib/cpufreq.c 	return sysfs_cpufreq_get_one_value(cpu, CPUINFO_LATENCY);
cpu               197 tools/power/cpupower/lib/cpufreq.c int cpufreq_get_hardware_limits(unsigned int cpu,
cpu               204 tools/power/cpupower/lib/cpufreq.c 	*min = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MIN_FREQ);
cpu               208 tools/power/cpupower/lib/cpufreq.c 	*max = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MAX_FREQ);
cpu               215 tools/power/cpupower/lib/cpufreq.c char *cpufreq_get_driver(unsigned int cpu)
cpu               217 tools/power/cpupower/lib/cpufreq.c 	return sysfs_cpufreq_get_one_string(cpu, SCALING_DRIVER);
cpu               227 tools/power/cpupower/lib/cpufreq.c struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu)
cpu               235 tools/power/cpupower/lib/cpufreq.c 	policy->governor = sysfs_cpufreq_get_one_string(cpu, SCALING_GOVERNOR);
cpu               240 tools/power/cpupower/lib/cpufreq.c 	policy->min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ);
cpu               241 tools/power/cpupower/lib/cpufreq.c 	policy->max = sysfs_cpufreq_get_one_value(cpu, SCALING_MAX_FREQ);
cpu               262 tools/power/cpupower/lib/cpufreq.c 								int cpu)
cpu               270 tools/power/cpupower/lib/cpufreq.c 	len = sysfs_cpufreq_read_file(cpu, "scaling_available_governors",
cpu               336 tools/power/cpupower/lib/cpufreq.c *cpufreq_get_available_frequencies(unsigned int cpu)
cpu               345 tools/power/cpupower/lib/cpufreq.c 	len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies",
cpu               392 tools/power/cpupower/lib/cpufreq.c *cpufreq_get_boost_frequencies(unsigned int cpu)
cpu               401 tools/power/cpupower/lib/cpufreq.c 	len = sysfs_cpufreq_read_file(cpu, "scaling_boost_frequencies",
cpu               467 tools/power/cpupower/lib/cpufreq.c static struct cpufreq_affected_cpus *sysfs_get_cpu_list(unsigned int cpu,
cpu               477 tools/power/cpupower/lib/cpufreq.c 	len = sysfs_cpufreq_read_file(cpu, file, linebuf, sizeof(linebuf));
cpu               505 tools/power/cpupower/lib/cpufreq.c 			if (sscanf(one_value, "%u", &current->cpu) != 1)
cpu               523 tools/power/cpupower/lib/cpufreq.c struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned int cpu)
cpu               525 tools/power/cpupower/lib/cpufreq.c 	return sysfs_get_cpu_list(cpu, "affected_cpus");
cpu               544 tools/power/cpupower/lib/cpufreq.c struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned int cpu)
cpu               546 tools/power/cpupower/lib/cpufreq.c 	return sysfs_get_cpu_list(cpu, "related_cpus");
cpu               589 tools/power/cpupower/lib/cpufreq.c int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy)
cpu               610 tools/power/cpupower/lib/cpufreq.c 	old_min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ);
cpu               614 tools/power/cpupower/lib/cpufreq.c 		ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
cpu               620 tools/power/cpupower/lib/cpufreq.c 	ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ, min,
cpu               626 tools/power/cpupower/lib/cpufreq.c 		ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
cpu               632 tools/power/cpupower/lib/cpufreq.c 	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR,
cpu               637 tools/power/cpupower/lib/cpufreq.c int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq)
cpu               643 tools/power/cpupower/lib/cpufreq.c 	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ,
cpu               648 tools/power/cpupower/lib/cpufreq.c int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq)
cpu               654 tools/power/cpupower/lib/cpufreq.c 	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
cpu               658 tools/power/cpupower/lib/cpufreq.c int cpufreq_modify_policy_governor(unsigned int cpu, char *governor)
cpu               668 tools/power/cpupower/lib/cpufreq.c 	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR,
cpu               672 tools/power/cpupower/lib/cpufreq.c int cpufreq_set_frequency(unsigned int cpu, unsigned long target_frequency)
cpu               674 tools/power/cpupower/lib/cpufreq.c 	struct cpufreq_policy *pol = cpufreq_get_policy(cpu);
cpu               683 tools/power/cpupower/lib/cpufreq.c 		ret = cpufreq_modify_policy_governor(cpu, userspace_gov);
cpu               694 tools/power/cpupower/lib/cpufreq.c 	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_SET_SPEED,
cpu               698 tools/power/cpupower/lib/cpufreq.c struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
cpu               708 tools/power/cpupower/lib/cpufreq.c 	len = sysfs_cpufreq_read_file(cpu, "stats/time_in_state",
cpu               773 tools/power/cpupower/lib/cpufreq.c unsigned long cpufreq_get_transitions(unsigned int cpu)
cpu               775 tools/power/cpupower/lib/cpufreq.c 	return sysfs_cpufreq_get_one_value(cpu, STATS_NUM_TRANSITIONS);
cpu                31 tools/power/cpupower/lib/cpufreq.h 	unsigned int cpu;
cpu                57 tools/power/cpupower/lib/cpufreq.h unsigned long cpufreq_get_freq_kernel(unsigned int cpu);
cpu                59 tools/power/cpupower/lib/cpufreq.h unsigned long cpufreq_get_freq_hardware(unsigned int cpu);
cpu                61 tools/power/cpupower/lib/cpufreq.h #define cpufreq_get(cpu) cpufreq_get_freq_kernel(cpu);
cpu                68 tools/power/cpupower/lib/cpufreq.h unsigned long cpufreq_get_transition_latency(unsigned int cpu);
cpu                77 tools/power/cpupower/lib/cpufreq.h int cpufreq_get_hardware_limits(unsigned int cpu,
cpu                88 tools/power/cpupower/lib/cpufreq.h char *cpufreq_get_driver(unsigned int cpu);
cpu               100 tools/power/cpupower/lib/cpufreq.h struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu);
cpu               114 tools/power/cpupower/lib/cpufreq.h *cpufreq_get_available_governors(unsigned int cpu);
cpu               128 tools/power/cpupower/lib/cpufreq.h *cpufreq_get_available_frequencies(unsigned int cpu);
cpu               134 tools/power/cpupower/lib/cpufreq.h *cpufreq_get_boost_frequencies(unsigned int cpu);
cpu               147 tools/power/cpupower/lib/cpufreq.h 							int cpu);
cpu               159 tools/power/cpupower/lib/cpufreq.h 							int cpu);
cpu               169 tools/power/cpupower/lib/cpufreq.h struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
cpu               174 tools/power/cpupower/lib/cpufreq.h unsigned long cpufreq_get_transitions(unsigned int cpu);
cpu               183 tools/power/cpupower/lib/cpufreq.h int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy);
cpu               191 tools/power/cpupower/lib/cpufreq.h int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq);
cpu               192 tools/power/cpupower/lib/cpufreq.h int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq);
cpu               193 tools/power/cpupower/lib/cpufreq.h int cpufreq_modify_policy_governor(unsigned int cpu, char *governor);
cpu               203 tools/power/cpupower/lib/cpufreq.h int cpufreq_set_frequency(unsigned int cpu,
cpu                29 tools/power/cpupower/lib/cpuidle.c unsigned int cpuidle_state_file_exists(unsigned int cpu,
cpu                38 tools/power/cpupower/lib/cpuidle.c 		 cpu, idlestate, fname);
cpu                51 tools/power/cpupower/lib/cpuidle.c unsigned int cpuidle_state_read_file(unsigned int cpu,
cpu                61 tools/power/cpupower/lib/cpuidle.c 		 cpu, idlestate, fname);
cpu                86 tools/power/cpupower/lib/cpuidle.c unsigned int cpuidle_state_write_file(unsigned int cpu,
cpu                96 tools/power/cpupower/lib/cpuidle.c 		 cpu, idlestate, fname);
cpu               133 tools/power/cpupower/lib/cpuidle.c unsigned long long cpuidle_state_get_one_value(unsigned int cpu,
cpu               145 tools/power/cpupower/lib/cpuidle.c 	len = cpuidle_state_read_file(cpu, idlestate,
cpu               173 tools/power/cpupower/lib/cpuidle.c static char *cpuidle_state_get_one_string(unsigned int cpu,
cpu               184 tools/power/cpupower/lib/cpuidle.c 	len = cpuidle_state_read_file(cpu, idlestate,
cpu               207 tools/power/cpupower/lib/cpuidle.c int cpuidle_is_state_disabled(unsigned int cpu,
cpu               210 tools/power/cpupower/lib/cpuidle.c 	if (cpuidle_state_count(cpu) <= idlestate)
cpu               213 tools/power/cpupower/lib/cpuidle.c 	if (!cpuidle_state_file_exists(cpu, idlestate,
cpu               216 tools/power/cpupower/lib/cpuidle.c 	return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_DISABLE);
cpu               228 tools/power/cpupower/lib/cpuidle.c int cpuidle_state_disable(unsigned int cpu,
cpu               235 tools/power/cpupower/lib/cpuidle.c 	if (cpuidle_state_count(cpu) <= idlestate)
cpu               238 tools/power/cpupower/lib/cpuidle.c 	if (!cpuidle_state_file_exists(cpu, idlestate,
cpu               244 tools/power/cpupower/lib/cpuidle.c 	bytes_written = cpuidle_state_write_file(cpu, idlestate, "disable",
cpu               251 tools/power/cpupower/lib/cpuidle.c unsigned long cpuidle_state_latency(unsigned int cpu,
cpu               254 tools/power/cpupower/lib/cpuidle.c 	return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_LATENCY);
cpu               257 tools/power/cpupower/lib/cpuidle.c unsigned long cpuidle_state_usage(unsigned int cpu,
cpu               260 tools/power/cpupower/lib/cpuidle.c 	return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_USAGE);
cpu               263 tools/power/cpupower/lib/cpuidle.c unsigned long long cpuidle_state_time(unsigned int cpu,
cpu               266 tools/power/cpupower/lib/cpuidle.c 	return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_TIME);
cpu               269 tools/power/cpupower/lib/cpuidle.c char *cpuidle_state_name(unsigned int cpu, unsigned int idlestate)
cpu               271 tools/power/cpupower/lib/cpuidle.c 	return cpuidle_state_get_one_string(cpu, idlestate, IDLESTATE_NAME);
cpu               274 tools/power/cpupower/lib/cpuidle.c char *cpuidle_state_desc(unsigned int cpu, unsigned int idlestate)
cpu               276 tools/power/cpupower/lib/cpuidle.c 	return cpuidle_state_get_one_string(cpu, idlestate, IDLESTATE_DESC);
cpu               284 tools/power/cpupower/lib/cpuidle.c unsigned int cpuidle_state_count(unsigned int cpu)
cpu               295 tools/power/cpupower/lib/cpuidle.c 	snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
cpu               301 tools/power/cpupower/lib/cpuidle.c 			 "cpu%u/cpuidle/state%d", cpu, idlestates);
cpu                 5 tools/power/cpupower/lib/cpuidle.h int cpuidle_is_state_disabled(unsigned int cpu,
cpu                 7 tools/power/cpupower/lib/cpuidle.h int cpuidle_state_disable(unsigned int cpu, unsigned int idlestate,
cpu                 9 tools/power/cpupower/lib/cpuidle.h unsigned long cpuidle_state_latency(unsigned int cpu,
cpu                11 tools/power/cpupower/lib/cpuidle.h unsigned long cpuidle_state_usage(unsigned int cpu,
cpu                13 tools/power/cpupower/lib/cpuidle.h unsigned long long cpuidle_state_time(unsigned int cpu,
cpu                15 tools/power/cpupower/lib/cpuidle.h char *cpuidle_state_name(unsigned int cpu,
cpu                17 tools/power/cpupower/lib/cpuidle.h char *cpuidle_state_desc(unsigned int cpu,
cpu                19 tools/power/cpupower/lib/cpuidle.h unsigned int cpuidle_state_count(unsigned int cpu);
cpu                46 tools/power/cpupower/lib/cpupower.c int cpupower_is_cpu_online(unsigned int cpu)
cpu                56 tools/power/cpupower/lib/cpupower.c 	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu);
cpu                65 tools/power/cpupower/lib/cpupower.c 	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu);
cpu                89 tools/power/cpupower/lib/cpupower.c static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result)
cpu                96 tools/power/cpupower/lib/cpupower.c 			 cpu, fname);
cpu               117 tools/power/cpupower/lib/cpupower.c 	else if (top1->cpu < top2->cpu)
cpu               119 tools/power/cpupower/lib/cpupower.c 	else if (top1->cpu > top2->cpu)
cpu               133 tools/power/cpupower/lib/cpupower.c 	int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF);
cpu               139 tools/power/cpupower/lib/cpupower.c 	for (cpu = 0; cpu < cpus; cpu++) {
cpu               140 tools/power/cpupower/lib/cpupower.c 		cpu_top->core_info[cpu].cpu = cpu;
cpu               141 tools/power/cpupower/lib/cpupower.c 		cpu_top->core_info[cpu].is_online = cpupower_is_cpu_online(cpu);
cpu               143 tools/power/cpupower/lib/cpupower.c 			cpu,
cpu               145 tools/power/cpupower/lib/cpupower.c 			&(cpu_top->core_info[cpu].pkg)) < 0) {
cpu               146 tools/power/cpupower/lib/cpupower.c 			cpu_top->core_info[cpu].pkg = -1;
cpu               147 tools/power/cpupower/lib/cpupower.c 			cpu_top->core_info[cpu].core = -1;
cpu               151 tools/power/cpupower/lib/cpupower.c 			cpu,
cpu               153 tools/power/cpupower/lib/cpupower.c 			&(cpu_top->core_info[cpu].core)) < 0) {
cpu               154 tools/power/cpupower/lib/cpupower.c 			cpu_top->core_info[cpu].pkg = -1;
cpu               155 tools/power/cpupower/lib/cpupower.c 			cpu_top->core_info[cpu].core = -1;
cpu               167 tools/power/cpupower/lib/cpupower.c 	for(cpu = 1; cpu < cpus; cpu++) {
cpu               168 tools/power/cpupower/lib/cpupower.c 		if (cpu_top->core_info[cpu].pkg != last_pkg &&
cpu               169 tools/power/cpupower/lib/cpupower.c 				cpu_top->core_info[cpu].pkg != -1) {
cpu               171 tools/power/cpupower/lib/cpupower.c 			last_pkg = cpu_top->core_info[cpu].pkg;
cpu                18 tools/power/cpupower/lib/cpupower.h 	int cpu;
cpu                30 tools/power/cpupower/lib/cpupower.h int cpupower_is_cpu_online(unsigned int cpu);
cpu                58 tools/power/cpupower/utils/cpufreq-info.c 	unsigned int cpu, nr_cpus;
cpu                67 tools/power/cpupower/utils/cpufreq-info.c 	for (cpu = 0; cpu < nr_cpus; cpu++) {
cpu                68 tools/power/cpupower/utils/cpufreq-info.c 		policy = cpufreq_get_policy(cpu);
cpu                72 tools/power/cpupower/utils/cpufreq-info.c 		if (cpufreq_get_hardware_limits(cpu, &min, &max)) {
cpu                79 tools/power/cpupower/utils/cpufreq-info.c 			cpu , policy->min, max ? min_pctg : 0, policy->max,
cpu               163 tools/power/cpupower/utils/cpufreq-info.c static int get_boost_mode_x86(unsigned int cpu)
cpu               169 tools/power/cpupower/utils/cpufreq-info.c 	ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states);
cpu               172 tools/power/cpupower/utils/cpufreq-info.c 				" on CPU %d -- are you root?\n"), cpu);
cpu               189 tools/power/cpupower/utils/cpufreq-info.c 		ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states,
cpu               216 tools/power/cpupower/utils/cpufreq-info.c 		intel_turbo_ratio = msr_intel_get_turbo_ratio(cpu);
cpu               245 tools/power/cpupower/utils/cpufreq-info.c static int get_boost_mode(unsigned int cpu)
cpu               252 tools/power/cpupower/utils/cpufreq-info.c 		return get_boost_mode_x86(cpu);
cpu               254 tools/power/cpupower/utils/cpufreq-info.c 	freqs = cpufreq_get_boost_frequencies(cpu);
cpu               272 tools/power/cpupower/utils/cpufreq-info.c static int get_freq_kernel(unsigned int cpu, unsigned int human)
cpu               274 tools/power/cpupower/utils/cpufreq-info.c 	unsigned long freq = cpufreq_get_freq_kernel(cpu);
cpu               291 tools/power/cpupower/utils/cpufreq-info.c static int get_freq_hardware(unsigned int cpu, unsigned int human)
cpu               293 tools/power/cpupower/utils/cpufreq-info.c 	unsigned long freq = cpufreq_get_freq_hardware(cpu);
cpu               309 tools/power/cpupower/utils/cpufreq-info.c static int get_hardware_limits(unsigned int cpu, unsigned int human)
cpu               313 tools/power/cpupower/utils/cpufreq-info.c 	if (cpufreq_get_hardware_limits(cpu, &min, &max)) {
cpu               332 tools/power/cpupower/utils/cpufreq-info.c static int get_driver(unsigned int cpu)
cpu               334 tools/power/cpupower/utils/cpufreq-info.c 	char *driver = cpufreq_get_driver(cpu);
cpu               346 tools/power/cpupower/utils/cpufreq-info.c static int get_policy(unsigned int cpu)
cpu               348 tools/power/cpupower/utils/cpufreq-info.c 	struct cpufreq_policy *policy = cpufreq_get_policy(cpu);
cpu               368 tools/power/cpupower/utils/cpufreq-info.c static int get_available_governors(unsigned int cpu)
cpu               371 tools/power/cpupower/utils/cpufreq-info.c 		cpufreq_get_available_governors(cpu);
cpu               391 tools/power/cpupower/utils/cpufreq-info.c static int get_affected_cpus(unsigned int cpu)
cpu               393 tools/power/cpupower/utils/cpufreq-info.c 	struct cpufreq_affected_cpus *cpus = cpufreq_get_affected_cpus(cpu);
cpu               402 tools/power/cpupower/utils/cpufreq-info.c 		printf("%d ", cpus->cpu);
cpu               405 tools/power/cpupower/utils/cpufreq-info.c 	printf("%d\n", cpus->cpu);
cpu               412 tools/power/cpupower/utils/cpufreq-info.c static int get_related_cpus(unsigned int cpu)
cpu               414 tools/power/cpupower/utils/cpufreq-info.c 	struct cpufreq_affected_cpus *cpus = cpufreq_get_related_cpus(cpu);
cpu               423 tools/power/cpupower/utils/cpufreq-info.c 		printf("%d ", cpus->cpu);
cpu               426 tools/power/cpupower/utils/cpufreq-info.c 	printf("%d\n", cpus->cpu);
cpu               433 tools/power/cpupower/utils/cpufreq-info.c static int get_freq_stats(unsigned int cpu, unsigned int human)
cpu               435 tools/power/cpupower/utils/cpufreq-info.c 	unsigned long total_trans = cpufreq_get_transitions(cpu);
cpu               437 tools/power/cpupower/utils/cpufreq-info.c 	struct cpufreq_stats *stats = cpufreq_get_stats(cpu, &total_time);
cpu               458 tools/power/cpupower/utils/cpufreq-info.c static int get_latency(unsigned int cpu, unsigned int human)
cpu               460 tools/power/cpupower/utils/cpufreq-info.c 	unsigned long latency = cpufreq_get_transition_latency(cpu);
cpu               476 tools/power/cpupower/utils/cpufreq-info.c static void debug_output_one(unsigned int cpu)
cpu               480 tools/power/cpupower/utils/cpufreq-info.c 	get_driver(cpu);
cpu               481 tools/power/cpupower/utils/cpufreq-info.c 	get_related_cpus(cpu);
cpu               482 tools/power/cpupower/utils/cpufreq-info.c 	get_affected_cpus(cpu);
cpu               483 tools/power/cpupower/utils/cpufreq-info.c 	get_latency(cpu, 1);
cpu               484 tools/power/cpupower/utils/cpufreq-info.c 	get_hardware_limits(cpu, 1);
cpu               486 tools/power/cpupower/utils/cpufreq-info.c 	freqs = cpufreq_get_available_frequencies(cpu);
cpu               499 tools/power/cpupower/utils/cpufreq-info.c 	get_available_governors(cpu);
cpu               500 tools/power/cpupower/utils/cpufreq-info.c 	get_policy(cpu);
cpu               501 tools/power/cpupower/utils/cpufreq-info.c 	if (get_freq_hardware(cpu, 1) < 0)
cpu               502 tools/power/cpupower/utils/cpufreq-info.c 		get_freq_kernel(cpu, 1);
cpu               503 tools/power/cpupower/utils/cpufreq-info.c 	get_boost_mode(cpu);
cpu               530 tools/power/cpupower/utils/cpufreq-info.c 	unsigned int cpu = 0;
cpu               613 tools/power/cpupower/utils/cpufreq-info.c 	for (cpu = bitmask_first(cpus_chosen);
cpu               614 tools/power/cpupower/utils/cpufreq-info.c 	     cpu <= bitmask_last(cpus_chosen); cpu++) {
cpu               616 tools/power/cpupower/utils/cpufreq-info.c 		if (!bitmask_isbitset(cpus_chosen, cpu))
cpu               619 tools/power/cpupower/utils/cpufreq-info.c 		printf(_("analyzing CPU %d:\n"), cpu);
cpu               621 tools/power/cpupower/utils/cpufreq-info.c 		if (sysfs_is_cpu_online(cpu) != 1) {
cpu               629 tools/power/cpupower/utils/cpufreq-info.c 			get_boost_mode(cpu);
cpu               632 tools/power/cpupower/utils/cpufreq-info.c 			debug_output_one(cpu);
cpu               635 tools/power/cpupower/utils/cpufreq-info.c 			ret = get_affected_cpus(cpu);
cpu               638 tools/power/cpupower/utils/cpufreq-info.c 			ret = get_related_cpus(cpu);
cpu               641 tools/power/cpupower/utils/cpufreq-info.c 			ret = get_available_governors(cpu);
cpu               644 tools/power/cpupower/utils/cpufreq-info.c 			ret = get_policy(cpu);
cpu               647 tools/power/cpupower/utils/cpufreq-info.c 			ret = get_driver(cpu);
cpu               650 tools/power/cpupower/utils/cpufreq-info.c 			ret = get_hardware_limits(cpu, human);
cpu               653 tools/power/cpupower/utils/cpufreq-info.c 			ret = get_freq_hardware(cpu, human);
cpu               656 tools/power/cpupower/utils/cpufreq-info.c 			ret = get_freq_kernel(cpu, human);
cpu               659 tools/power/cpupower/utils/cpufreq-info.c 			ret = get_freq_stats(cpu, human);
cpu               662 tools/power/cpupower/utils/cpufreq-info.c 			ret = get_latency(cpu, human);
cpu               140 tools/power/cpupower/utils/cpufreq-set.c static int do_new_policy(unsigned int cpu, struct cpufreq_policy *new_pol)
cpu               142 tools/power/cpupower/utils/cpufreq-set.c 	struct cpufreq_policy *cur_pol = cpufreq_get_policy(cpu);
cpu               159 tools/power/cpupower/utils/cpufreq-set.c 	ret = cpufreq_set_policy(cpu, new_pol);
cpu               167 tools/power/cpupower/utils/cpufreq-set.c static int do_one_cpu(unsigned int cpu, struct cpufreq_policy *new_pol,
cpu               172 tools/power/cpupower/utils/cpufreq-set.c 		return cpufreq_set_frequency(cpu, freq);
cpu               179 tools/power/cpupower/utils/cpufreq-set.c 			return cpufreq_modify_policy_min(cpu, new_pol->min);
cpu               181 tools/power/cpupower/utils/cpufreq-set.c 			return cpufreq_modify_policy_max(cpu, new_pol->max);
cpu               183 tools/power/cpupower/utils/cpufreq-set.c 			return cpufreq_modify_policy_governor(cpu,
cpu               188 tools/power/cpupower/utils/cpufreq-set.c 		return do_new_policy(cpu, new_pol);
cpu               200 tools/power/cpupower/utils/cpufreq-set.c 	unsigned int cpu;
cpu               293 tools/power/cpupower/utils/cpufreq-set.c 		for (cpu = bitmask_first(cpus_chosen);
cpu               294 tools/power/cpupower/utils/cpufreq-set.c 		     cpu <= bitmask_last(cpus_chosen); cpu++) {
cpu               297 tools/power/cpupower/utils/cpufreq-set.c 			if (!bitmask_isbitset(cpus_chosen, cpu) ||
cpu               298 tools/power/cpupower/utils/cpufreq-set.c 			    cpupower_is_cpu_online(cpu) != 1)
cpu               301 tools/power/cpupower/utils/cpufreq-set.c 			cpus = cpufreq_get_related_cpus(cpu);
cpu               305 tools/power/cpupower/utils/cpufreq-set.c 				bitmask_setbit(cpus_chosen, cpus->cpu);
cpu               309 tools/power/cpupower/utils/cpufreq-set.c 			bitmask_setbit(cpus_chosen, cpus->cpu);
cpu               316 tools/power/cpupower/utils/cpufreq-set.c 	for (cpu = bitmask_first(cpus_chosen);
cpu               317 tools/power/cpupower/utils/cpufreq-set.c 	     cpu <= bitmask_last(cpus_chosen); cpu++) {
cpu               319 tools/power/cpupower/utils/cpufreq-set.c 		if (!bitmask_isbitset(cpus_chosen, cpu) ||
cpu               320 tools/power/cpupower/utils/cpufreq-set.c 		    cpupower_is_cpu_online(cpu) != 1)
cpu               323 tools/power/cpupower/utils/cpufreq-set.c 		printf(_("Setting cpu: %d\n"), cpu);
cpu               324 tools/power/cpupower/utils/cpufreq-set.c 		ret = do_one_cpu(cpu, &new_pol, freq, policychange);
cpu                23 tools/power/cpupower/utils/cpuidle-info.c static void cpuidle_cpu_output(unsigned int cpu, int verbose)
cpu                28 tools/power/cpupower/utils/cpuidle-info.c 	idlestates = cpuidle_state_count(cpu);
cpu                30 tools/power/cpupower/utils/cpuidle-info.c 		printf(_("CPU %u: No idle states\n"), cpu);
cpu                37 tools/power/cpupower/utils/cpuidle-info.c 		tmp = cpuidle_state_name(cpu, idlestate);
cpu                49 tools/power/cpupower/utils/cpuidle-info.c 		int disabled = cpuidle_is_state_disabled(cpu, idlestate);
cpu                53 tools/power/cpupower/utils/cpuidle-info.c 		tmp = cpuidle_state_name(cpu, idlestate);
cpu                59 tools/power/cpupower/utils/cpuidle-info.c 		tmp = cpuidle_state_desc(cpu, idlestate);
cpu                66 tools/power/cpupower/utils/cpuidle-info.c 		       cpuidle_state_latency(cpu, idlestate));
cpu                68 tools/power/cpupower/utils/cpuidle-info.c 		       cpuidle_state_usage(cpu, idlestate));
cpu                70 tools/power/cpupower/utils/cpuidle-info.c 		       cpuidle_state_time(cpu, idlestate));
cpu                97 tools/power/cpupower/utils/cpuidle-info.c static void proc_cpuidle_cpu_output(unsigned int cpu)
cpu               102 tools/power/cpupower/utils/cpuidle-info.c 	cstates = cpuidle_state_count(cpu);
cpu               104 tools/power/cpupower/utils/cpuidle-info.c 		printf(_("CPU %u: No C-states info\n"), cpu);
cpu               117 tools/power/cpupower/utils/cpuidle-info.c 		       cpuidle_state_latency(cpu, cstate));
cpu               119 tools/power/cpupower/utils/cpuidle-info.c 		       cpuidle_state_usage(cpu, cstate));
cpu               121 tools/power/cpupower/utils/cpuidle-info.c 		       cpuidle_state_time(cpu, cstate));
cpu               141 tools/power/cpupower/utils/cpuidle-info.c 	unsigned int cpu = 0;
cpu               186 tools/power/cpupower/utils/cpuidle-info.c 	for (cpu = bitmask_first(cpus_chosen);
cpu               187 tools/power/cpupower/utils/cpuidle-info.c 	     cpu <= bitmask_last(cpus_chosen); cpu++) {
cpu               189 tools/power/cpupower/utils/cpuidle-info.c 		if (!bitmask_isbitset(cpus_chosen, cpu))
cpu               192 tools/power/cpupower/utils/cpuidle-info.c 		printf(_("analyzing CPU %d:\n"), cpu);
cpu               194 tools/power/cpupower/utils/cpuidle-info.c 		if (sysfs_is_cpu_online(cpu) != 1) {
cpu               203 tools/power/cpupower/utils/cpuidle-info.c 			proc_cpuidle_cpu_output(cpu);
cpu               207 tools/power/cpupower/utils/cpuidle-info.c 			cpuidle_cpu_output(cpu, verbose);
cpu                31 tools/power/cpupower/utils/cpuidle-set.c 	unsigned int cpu = 0, idlestate = 0, idlestates = 0;
cpu               102 tools/power/cpupower/utils/cpuidle-set.c 	for (cpu = bitmask_first(cpus_chosen);
cpu               103 tools/power/cpupower/utils/cpuidle-set.c 	     cpu <= bitmask_last(cpus_chosen); cpu++) {
cpu               105 tools/power/cpupower/utils/cpuidle-set.c 		if (!bitmask_isbitset(cpus_chosen, cpu))
cpu               108 tools/power/cpupower/utils/cpuidle-set.c 		if (cpupower_is_cpu_online(cpu) != 1)
cpu               111 tools/power/cpupower/utils/cpuidle-set.c 		idlestates = cpuidle_state_count(cpu);
cpu               117 tools/power/cpupower/utils/cpuidle-set.c 			ret = cpuidle_state_disable(cpu, idlestate, 1);
cpu               119 tools/power/cpupower/utils/cpuidle-set.c 		printf(_("Idlestate %u disabled on CPU %u\n"),  idlestate, cpu);
cpu               122 tools/power/cpupower/utils/cpuidle-set.c 		       idlestate, cpu);
cpu               127 tools/power/cpupower/utils/cpuidle-set.c 		       idlestate, cpu);
cpu               130 tools/power/cpupower/utils/cpuidle-set.c 			ret = cpuidle_state_disable(cpu, idlestate, 0);
cpu               132 tools/power/cpupower/utils/cpuidle-set.c 		printf(_("Idlestate %u enabled on CPU %u\n"),  idlestate, cpu);
cpu               135 tools/power/cpupower/utils/cpuidle-set.c 		       idlestate, cpu);
cpu               140 tools/power/cpupower/utils/cpuidle-set.c 		       idlestate, cpu);
cpu               145 tools/power/cpupower/utils/cpuidle-set.c 					(cpu, idlestate);
cpu               147 tools/power/cpupower/utils/cpuidle-set.c 					(cpu, idlestate);
cpu               151 tools/power/cpupower/utils/cpuidle-set.c 							(cpu, idlestate, 0);
cpu               153 tools/power/cpupower/utils/cpuidle-set.c 		printf(_("Idlestate %u enabled on CPU %u\n"),  idlestate, cpu);
cpu               159 tools/power/cpupower/utils/cpuidle-set.c 						(cpu, idlestate, 1);
cpu               161 tools/power/cpupower/utils/cpuidle-set.c 		printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu);
cpu               168 tools/power/cpupower/utils/cpuidle-set.c 					(cpu, idlestate);
cpu               171 tools/power/cpupower/utils/cpuidle-set.c 						(cpu, idlestate, 0);
cpu               173 tools/power/cpupower/utils/cpuidle-set.c 		printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu);
cpu                32 tools/power/cpupower/utils/cpupower-info.c 	unsigned int cpu;
cpu                81 tools/power/cpupower/utils/cpupower-info.c 	for (cpu = bitmask_first(cpus_chosen);
cpu                82 tools/power/cpupower/utils/cpupower-info.c 	     cpu <= bitmask_last(cpus_chosen); cpu++) {
cpu                84 tools/power/cpupower/utils/cpupower-info.c 		if (!bitmask_isbitset(cpus_chosen, cpu))
cpu                87 tools/power/cpupower/utils/cpupower-info.c 		printf(_("analyzing CPU %d:\n"), cpu);
cpu                89 tools/power/cpupower/utils/cpupower-info.c 		if (sysfs_is_cpu_online(cpu) != 1){
cpu                95 tools/power/cpupower/utils/cpupower-info.c 			ret = msr_intel_get_perf_bias(cpu);
cpu                33 tools/power/cpupower/utils/cpupower-set.c 	unsigned int cpu;
cpu                76 tools/power/cpupower/utils/cpupower-set.c 	for (cpu = bitmask_first(cpus_chosen);
cpu                77 tools/power/cpupower/utils/cpupower-set.c 	     cpu <= bitmask_last(cpus_chosen); cpu++) {
cpu                79 tools/power/cpupower/utils/cpupower-set.c 		if (!bitmask_isbitset(cpus_chosen, cpu))
cpu                82 tools/power/cpupower/utils/cpupower-set.c 		if (sysfs_is_cpu_online(cpu) != 1){
cpu                83 tools/power/cpupower/utils/cpupower-set.c 			fprintf(stderr, _("Cannot set values on CPU %d:"), cpu);
cpu                89 tools/power/cpupower/utils/cpupower-set.c 			ret = msr_intel_set_perf_bias(cpu, perf_bias);
cpu                92 tools/power/cpupower/utils/cpupower-set.c 						  "value on CPU %d\n"), cpu);
cpu                88 tools/power/cpupower/utils/helpers/amd.c int decode_pstates(unsigned int cpu, unsigned int cpu_family,
cpu               102 tools/power/cpupower/utils/helpers/amd.c 	if (read_msr(cpu, MSR_AMD_PSTATE_LIMIT, &val))
cpu               107 tools/power/cpupower/utils/helpers/amd.c 	if (read_msr(cpu, MSR_AMD_PSTATE_STATUS, &val))
cpu               120 tools/power/cpupower/utils/helpers/amd.c 		if (read_msr(cpu, MSR_AMD_PSTATE + i, &pstate.val))
cpu               104 tools/power/cpupower/utils/helpers/helpers.h extern int read_msr(int cpu, unsigned int idx, unsigned long long *val);
cpu               105 tools/power/cpupower/utils/helpers/helpers.h extern int write_msr(int cpu, unsigned int idx, unsigned long long val);
cpu               107 tools/power/cpupower/utils/helpers/helpers.h extern int msr_intel_set_perf_bias(unsigned int cpu, unsigned int val);
cpu               108 tools/power/cpupower/utils/helpers/helpers.h extern int msr_intel_get_perf_bias(unsigned int cpu);
cpu               109 tools/power/cpupower/utils/helpers/helpers.h extern unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu);
cpu               125 tools/power/cpupower/utils/helpers/helpers.h extern int decode_pstates(unsigned int cpu, unsigned int cpu_family,
cpu               130 tools/power/cpupower/utils/helpers/helpers.h extern int cpufreq_has_boost_support(unsigned int cpu, int *support,
cpu               143 tools/power/cpupower/utils/helpers/helpers.h static inline int decode_pstates(unsigned int cpu, unsigned int cpu_family,
cpu               148 tools/power/cpupower/utils/helpers/helpers.h static inline int read_msr(int cpu, unsigned int idx, unsigned long long *val)
cpu               150 tools/power/cpupower/utils/helpers/helpers.h static inline int write_msr(int cpu, unsigned int idx, unsigned long long val)
cpu               152 tools/power/cpupower/utils/helpers/helpers.h static inline int msr_intel_set_perf_bias(unsigned int cpu, unsigned int val)
cpu               154 tools/power/cpupower/utils/helpers/helpers.h static inline int msr_intel_get_perf_bias(unsigned int cpu)
cpu               156 tools/power/cpupower/utils/helpers/helpers.h static inline unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu)
cpu               161 tools/power/cpupower/utils/helpers/helpers.h static inline int cpufreq_has_boost_support(unsigned int cpu, int *support,
cpu                 8 tools/power/cpupower/utils/helpers/misc.c int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active,
cpu                30 tools/power/cpupower/utils/helpers/misc.c 			if (!read_msr(cpu, MSR_AMD_HWCR, &val)) {
cpu                27 tools/power/cpupower/utils/helpers/msr.c int read_msr(int cpu, unsigned int idx, unsigned long long *val)
cpu                32 tools/power/cpupower/utils/helpers/msr.c 	sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
cpu                56 tools/power/cpupower/utils/helpers/msr.c int write_msr(int cpu, unsigned int idx, unsigned long long val)
cpu                61 tools/power/cpupower/utils/helpers/msr.c 	sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
cpu                76 tools/power/cpupower/utils/helpers/msr.c int msr_intel_get_perf_bias(unsigned int cpu)
cpu                84 tools/power/cpupower/utils/helpers/msr.c 	ret = read_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &val);
cpu                90 tools/power/cpupower/utils/helpers/msr.c int msr_intel_set_perf_bias(unsigned int cpu, unsigned int val)
cpu                97 tools/power/cpupower/utils/helpers/msr.c 	ret = write_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, val);
cpu               103 tools/power/cpupower/utils/helpers/msr.c unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu)
cpu               111 tools/power/cpupower/utils/helpers/msr.c 	ret = read_msr(cpu, MSR_NEHALEM_TURBO_RATIO_LIMIT, &val);
cpu                47 tools/power/cpupower/utils/helpers/sysfs.c int sysfs_is_cpu_online(unsigned int cpu)
cpu                57 tools/power/cpupower/utils/helpers/sysfs.c 	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu);
cpu                66 tools/power/cpupower/utils/helpers/sysfs.c 	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu);
cpu               103 tools/power/cpupower/utils/helpers/sysfs.c unsigned int sysfs_idlestate_file_exists(unsigned int cpu,
cpu               112 tools/power/cpupower/utils/helpers/sysfs.c 		 cpu, idlestate, fname);
cpu               124 tools/power/cpupower/utils/helpers/sysfs.c unsigned int sysfs_idlestate_read_file(unsigned int cpu, unsigned int idlestate,
cpu               132 tools/power/cpupower/utils/helpers/sysfs.c 		 cpu, idlestate, fname);
cpu               157 tools/power/cpupower/utils/helpers/sysfs.c unsigned int sysfs_idlestate_write_file(unsigned int cpu,
cpu               167 tools/power/cpupower/utils/helpers/sysfs.c 		 cpu, idlestate, fname);
cpu               203 tools/power/cpupower/utils/helpers/sysfs.c static unsigned long long sysfs_idlestate_get_one_value(unsigned int cpu,
cpu               215 tools/power/cpupower/utils/helpers/sysfs.c 	len = sysfs_idlestate_read_file(cpu, idlestate,
cpu               243 tools/power/cpupower/utils/helpers/sysfs.c static char *sysfs_idlestate_get_one_string(unsigned int cpu,
cpu               254 tools/power/cpupower/utils/helpers/sysfs.c 	len = sysfs_idlestate_read_file(cpu, idlestate,
cpu               277 tools/power/cpupower/utils/helpers/sysfs.c int sysfs_is_idlestate_disabled(unsigned int cpu,
cpu               280 tools/power/cpupower/utils/helpers/sysfs.c 	if (sysfs_get_idlestate_count(cpu) <= idlestate)
cpu               283 tools/power/cpupower/utils/helpers/sysfs.c 	if (!sysfs_idlestate_file_exists(cpu, idlestate,
cpu               286 tools/power/cpupower/utils/helpers/sysfs.c 	return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_DISABLE);
cpu               298 tools/power/cpupower/utils/helpers/sysfs.c int sysfs_idlestate_disable(unsigned int cpu,
cpu               305 tools/power/cpupower/utils/helpers/sysfs.c 	if (sysfs_get_idlestate_count(cpu) <= idlestate)
cpu               308 tools/power/cpupower/utils/helpers/sysfs.c 	if (!sysfs_idlestate_file_exists(cpu, idlestate,
cpu               314 tools/power/cpupower/utils/helpers/sysfs.c 	bytes_written = sysfs_idlestate_write_file(cpu, idlestate, "disable",
cpu               321 tools/power/cpupower/utils/helpers/sysfs.c unsigned long sysfs_get_idlestate_latency(unsigned int cpu,
cpu               324 tools/power/cpupower/utils/helpers/sysfs.c 	return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_LATENCY);
cpu               327 tools/power/cpupower/utils/helpers/sysfs.c unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
cpu               330 tools/power/cpupower/utils/helpers/sysfs.c 	return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_USAGE);
cpu               333 tools/power/cpupower/utils/helpers/sysfs.c unsigned long long sysfs_get_idlestate_time(unsigned int cpu,
cpu               336 tools/power/cpupower/utils/helpers/sysfs.c 	return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_TIME);
cpu               339 tools/power/cpupower/utils/helpers/sysfs.c char *sysfs_get_idlestate_name(unsigned int cpu, unsigned int idlestate)
cpu               341 tools/power/cpupower/utils/helpers/sysfs.c 	return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_NAME);
cpu               344 tools/power/cpupower/utils/helpers/sysfs.c char *sysfs_get_idlestate_desc(unsigned int cpu, unsigned int idlestate)
cpu               346 tools/power/cpupower/utils/helpers/sysfs.c 	return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_DESC);
cpu               354 tools/power/cpupower/utils/helpers/sysfs.c unsigned int sysfs_get_idlestate_count(unsigned int cpu)
cpu               365 tools/power/cpupower/utils/helpers/sysfs.c 	snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
cpu               371 tools/power/cpupower/utils/helpers/sysfs.c 			 "cpu%u/cpuidle/state%d", cpu, idlestates);
cpu                11 tools/power/cpupower/utils/helpers/sysfs.h extern unsigned int sysfs_idlestate_file_exists(unsigned int cpu,
cpu                15 tools/power/cpupower/utils/helpers/sysfs.h extern int sysfs_is_cpu_online(unsigned int cpu);
cpu                17 tools/power/cpupower/utils/helpers/sysfs.h extern int sysfs_is_idlestate_disabled(unsigned int cpu,
cpu                19 tools/power/cpupower/utils/helpers/sysfs.h extern int sysfs_idlestate_disable(unsigned int cpu, unsigned int idlestate,
cpu                21 tools/power/cpupower/utils/helpers/sysfs.h extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu,
cpu                23 tools/power/cpupower/utils/helpers/sysfs.h extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
cpu                25 tools/power/cpupower/utils/helpers/sysfs.h extern unsigned long long sysfs_get_idlestate_time(unsigned int cpu,
cpu                27 tools/power/cpupower/utils/helpers/sysfs.h extern char *sysfs_get_idlestate_name(unsigned int cpu,
cpu                29 tools/power/cpupower/utils/helpers/sysfs.h extern char *sysfs_get_idlestate_desc(unsigned int cpu,
cpu                31 tools/power/cpupower/utils/helpers/sysfs.h extern unsigned int sysfs_get_idlestate_count(unsigned int cpu);
cpu                46 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 				    unsigned int cpu);
cpu                48 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 			     unsigned int cpu);
cpu                99 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 				   unsigned int cpu)
cpu               124 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c static int amd_fam14h_init(cstate_t *state, unsigned int cpu)
cpu               129 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 	ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu);
cpu               150 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 	       (unsigned int) val, cpu);
cpu               154 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 	previous_count[state->id][cpu] = 0;
cpu               159 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c static int amd_fam14h_disable(cstate_t *state, unsigned int cpu)
cpu               164 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 	ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu);
cpu               181 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 	current_count[state->id][cpu] = val;
cpu               184 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 	       current_count[state->id][cpu], cpu);
cpu               186 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 	       previous_count[state->id][cpu], cpu);
cpu               196 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 			     unsigned int cpu)
cpu               208 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 				    unsigned int cpu)
cpu               215 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 	diff = current_count[id][cpu] - previous_count[id][cpu];
cpu               230 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 	int num, cpu;
cpu               233 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++)
cpu               234 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 			amd_fam14h_init(&amd_fam14h_cstates[num], cpu);
cpu               247 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 	int num, cpu;
cpu               253 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++)
cpu               254 tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c 			amd_fam14h_disable(&amd_fam14h_cstates[num], cpu);
cpu                26 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 				     unsigned int cpu)
cpu                28 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	unsigned long long statediff = current_count[cpu][id]
cpu                29 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 		- previous_count[cpu][id];
cpu                31 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	       cpuidle_cstates[id].name, timediff, *percent, cpu);
cpu                39 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	       cpuidle_cstates[id].name, timediff, statediff, *percent, cpu);
cpu                46 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	int cpu, state;
cpu                48 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	for (cpu = 0; cpu < cpu_count; cpu++) {
cpu                51 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 			previous_count[cpu][state] =
cpu                52 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 				cpuidle_state_time(cpu, state);
cpu                54 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 			       cpu, state, previous_count[cpu][state]);
cpu                62 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	int cpu, state;
cpu                67 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 	for (cpu = 0; cpu < cpu_count; cpu++) {
cpu                70 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 			current_count[cpu][state] =
cpu                71 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 				cpuidle_state_time(cpu, state);
cpu                73 tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c 			       cpu, state, previous_count[cpu][state]);
cpu               140 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c void print_results(int topology_depth, int cpu)
cpu               149 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu))
cpu               151 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	if (!cpu_top.core_info[cpu].is_online &&
cpu               152 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	    cpu_top.core_info[cpu].pkg == -1)
cpu               156 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 		printf("%4d|", cpu_top.core_info[cpu].pkg);
cpu               158 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 		printf("%4d|", cpu_top.core_info[cpu].core);
cpu               160 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 		printf("%4d|", cpu_top.core_info[cpu].cpu);
cpu               174 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 						  cpu_top.core_info[cpu].cpu);
cpu               183 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 						  cpu_top.core_info[cpu].cpu);
cpu               202 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	if (!cpu_top.core_info[cpu].is_online &&
cpu               203 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	    cpu_top.core_info[cpu].pkg != -1) {
cpu               327 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	int cpu;
cpu               330 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 		for (cpu = 0; cpu < cpu_count; cpu++)
cpu               331 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 			bind_cpu(cpu);
cpu               342 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 		for (cpu = 0; cpu < cpu_count; cpu++)
cpu               343 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 			bind_cpu(cpu);
cpu               391 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	int cpu;
cpu               455 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 	for (cpu = 0; cpu < cpu_count; cpu++) {
cpu               457 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 			print_results(3, cpu);
cpu               459 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c 			print_results(1, cpu);
cpu                47 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h 				 unsigned int cpu);
cpu                49 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h 			 unsigned int cpu);
cpu                80 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h static inline int bind_cpu(int cpu)
cpu                86 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h 		CPU_SET(cpu, &set);
cpu                29 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 				 unsigned int cpu);
cpu                63 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 			unsigned int cpu)
cpu                83 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 	if (read_msr(cpu, msr, val))
cpu                89 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 				 unsigned int cpu)
cpu                93 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 	if (!is_valid[cpu])
cpu                97 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 		(current_count[id][cpu] - previous_count[id][cpu])) /
cpu               101 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 		hsw_ext_cstates[id].name, previous_count[id][cpu],
cpu               102 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 		current_count[id][cpu], cpu);
cpu               107 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 	       current_count[id][cpu] - previous_count[id][cpu],
cpu               108 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 	       *percent, cpu);
cpu               115 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 	int num, cpu;
cpu               119 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu               120 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 			hsw_ext_get_count(num, &val, cpu);
cpu               121 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 			previous_count[num][cpu] = val;
cpu               131 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 	int num, cpu;
cpu               136 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu               137 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 			is_valid[cpu] = !hsw_ext_get_count(num, &val, cpu);
cpu               138 tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c 			current_count[num][cpu] = val;
cpu                29 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 				   unsigned int cpu);
cpu                31 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 				unsigned int cpu);
cpu                89 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c static int mperf_init_stats(unsigned int cpu)
cpu                94 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	ret = read_msr(cpu, MSR_APERF, &val);
cpu                95 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	aperf_previous_count[cpu] = val;
cpu                96 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	ret |= read_msr(cpu, MSR_MPERF, &val);
cpu                97 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	mperf_previous_count[cpu] = val;
cpu                98 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	is_valid[cpu] = !ret;
cpu               103 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c static int mperf_measure_stats(unsigned int cpu)
cpu               108 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	ret = read_msr(cpu, MSR_APERF, &val);
cpu               109 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	aperf_current_count[cpu] = val;
cpu               110 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	ret |= read_msr(cpu, MSR_MPERF, &val);
cpu               111 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	mperf_current_count[cpu] = val;
cpu               112 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	is_valid[cpu] = !ret;
cpu               118 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 				   unsigned int cpu)
cpu               123 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	if (!is_valid[cpu])
cpu               129 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu];
cpu               130 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
cpu               149 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 		mperf_cstates[id].name, mperf_diff, aperf_diff, cpu);
cpu               155 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 				unsigned int cpu)
cpu               162 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	if (!is_valid[cpu])
cpu               165 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu];
cpu               166 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
cpu               188 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	int cpu;
cpu               194 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	for (cpu = 0; cpu < cpu_count; cpu++)
cpu               195 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 		mperf_init_stats(cpu);
cpu               205 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	int cpu;
cpu               207 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 	for (cpu = 0; cpu < cpu_count; cpu++)
cpu               208 tools/power/cpupower/utils/idle_monitor/mperf_monitor.c 		mperf_measure_stats(cpu);
cpu                30 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 				 unsigned int cpu);
cpu                72 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 			unsigned int cpu)
cpu                95 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 	if (read_msr(cpu, msr, val))
cpu               102 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 				 unsigned int cpu)
cpu               106 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 	if (!is_valid[cpu])
cpu               110 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 		(current_count[id][cpu] - previous_count[id][cpu])) /
cpu               114 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 		nhm_cstates[id].name, previous_count[id][cpu],
cpu               115 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 		current_count[id][cpu], cpu);
cpu               120 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 	       current_count[id][cpu] - previous_count[id][cpu],
cpu               121 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 	       *percent, cpu);
cpu               128 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 	int num, cpu;
cpu               134 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu               135 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 			is_valid[cpu] = !nhm_get_count(num, &val, cpu);
cpu               136 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 			previous_count[num][cpu] = val;
cpu               148 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 	int num, cpu;
cpu               153 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu               154 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 			is_valid[cpu] = !nhm_get_count(num, &val, cpu);
cpu               155 tools/power/cpupower/utils/idle_monitor/nhm_idle.c 			current_count[num][cpu] = val;
cpu                27 tools/power/cpupower/utils/idle_monitor/snb_idle.c 				 unsigned int cpu);
cpu                61 tools/power/cpupower/utils/idle_monitor/snb_idle.c 			unsigned int cpu)
cpu                81 tools/power/cpupower/utils/idle_monitor/snb_idle.c 	if (read_msr(cpu, msr, val))
cpu                87 tools/power/cpupower/utils/idle_monitor/snb_idle.c 				 unsigned int cpu)
cpu                91 tools/power/cpupower/utils/idle_monitor/snb_idle.c 	if (!is_valid[cpu])
cpu                95 tools/power/cpupower/utils/idle_monitor/snb_idle.c 		(current_count[id][cpu] - previous_count[id][cpu])) /
cpu                99 tools/power/cpupower/utils/idle_monitor/snb_idle.c 		snb_cstates[id].name, previous_count[id][cpu],
cpu               100 tools/power/cpupower/utils/idle_monitor/snb_idle.c 		current_count[id][cpu], cpu);
cpu               105 tools/power/cpupower/utils/idle_monitor/snb_idle.c 	       current_count[id][cpu] - previous_count[id][cpu],
cpu               106 tools/power/cpupower/utils/idle_monitor/snb_idle.c 	       *percent, cpu);
cpu               113 tools/power/cpupower/utils/idle_monitor/snb_idle.c 	int num, cpu;
cpu               117 tools/power/cpupower/utils/idle_monitor/snb_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu               118 tools/power/cpupower/utils/idle_monitor/snb_idle.c 			snb_get_count(num, &val, cpu);
cpu               119 tools/power/cpupower/utils/idle_monitor/snb_idle.c 			previous_count[num][cpu] = val;
cpu               129 tools/power/cpupower/utils/idle_monitor/snb_idle.c 	int num, cpu;
cpu               134 tools/power/cpupower/utils/idle_monitor/snb_idle.c 		for (cpu = 0; cpu < cpu_count; cpu++) {
cpu               135 tools/power/cpupower/utils/idle_monitor/snb_idle.c 			is_valid[cpu] = !snb_get_count(num, &val, cpu);
cpu               136 tools/power/cpupower/utils/idle_monitor/snb_idle.c 			current_count[num][cpu] = val;
cpu               139 tools/power/x86/intel-speed-select/isst-config.c int get_physical_package_id(int cpu)
cpu               143 tools/power/x86/intel-speed-select/isst-config.c 		cpu);
cpu               146 tools/power/x86/intel-speed-select/isst-config.c int get_physical_core_id(int cpu)
cpu               149 tools/power/x86/intel-speed-select/isst-config.c 		0, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
cpu               152 tools/power/x86/intel-speed-select/isst-config.c int get_physical_die_id(int cpu)
cpu               157 tools/power/x86/intel-speed-select/isst-config.c 			     cpu);
cpu               169 tools/power/x86/intel-speed-select/isst-config.c static void set_cpu_online_offline(int cpu, int state)
cpu               175 tools/power/x86/intel-speed-select/isst-config.c 		 "/sys/devices/system/cpu/cpu%d/online", cpu);
cpu               413 tools/power/x86/intel-speed-select/isst-config.c void set_cpu_mask_from_punit_coremask(int cpu, unsigned long long core_mask,
cpu               421 tools/power/x86/intel-speed-select/isst-config.c 	die_id = get_physical_die_id(cpu);
cpu               422 tools/power/x86/intel-speed-select/isst-config.c 	pkg_id = get_physical_package_id(cpu);
cpu               454 tools/power/x86/intel-speed-select/isst-config.c static int isst_send_mmio_command(unsigned int cpu, unsigned int reg, int write,
cpu               462 tools/power/x86/intel-speed-select/isst-config.c 	debug_printf("mmio_cmd cpu:%d reg:%d write:%d\n", cpu, reg, write);
cpu               469 tools/power/x86/intel-speed-select/isst-config.c 	io_regs.io_reg[0].logical_cpu = cpu;
cpu               482 tools/power/x86/intel-speed-select/isst-config.c 			cpu, reg, write);
cpu               489 tools/power/x86/intel-speed-select/isst-config.c 			cpu, reg, write, *value);
cpu               497 tools/power/x86/intel-speed-select/isst-config.c int isst_send_mbox_command(unsigned int cpu, unsigned char command,
cpu               507 tools/power/x86/intel-speed-select/isst-config.c 		cpu, command, sub_command, parameter, req_data);
cpu               514 tools/power/x86/intel-speed-select/isst-config.c 		debug_printf("CPU %d\n", cpu);
cpu               525 tools/power/x86/intel-speed-select/isst-config.c 				cpu, PQR_ASSOC_OFFSET + core_id * 4, write,
cpu               533 tools/power/x86/intel-speed-select/isst-config.c 				cpu, PM_CLOS_OFFSET + clos_id * 4, write,
cpu               539 tools/power/x86/intel-speed-select/isst-config.c 			ret = isst_send_mmio_command(cpu, PM_QOS_CONFIG_OFFSET,
cpu               553 tools/power/x86/intel-speed-select/isst-config.c 	mbox_cmds.mbox_cmd[0].logical_cpu = cpu;
cpu               567 tools/power/x86/intel-speed-select/isst-config.c 			cpu, command, sub_command, parameter, req_data);
cpu               572 tools/power/x86/intel-speed-select/isst-config.c 			cpu, command, sub_command, parameter, req_data, *resp);
cpu               580 tools/power/x86/intel-speed-select/isst-config.c int isst_send_msr_command(unsigned int cpu, unsigned int msr, int write,
cpu               592 tools/power/x86/intel-speed-select/isst-config.c 	msr_cmds.msr_cmd[0].logical_cpu = cpu;
cpu               601 tools/power/x86/intel-speed-select/isst-config.c 			cpu, msr, write);
cpu               608 tools/power/x86/intel-speed-select/isst-config.c 			cpu, msr, write, *req_resp, msr_cmds.msr_cmd[0].data);
cpu               668 tools/power/x86/intel-speed-select/isst-config.c static void exec_on_get_ctdp_cpu(int cpu, void *arg1, void *arg2, void *arg3,
cpu               671 tools/power/x86/intel-speed-select/isst-config.c 	int (*fn_ptr)(int cpu, void *arg);
cpu               675 tools/power/x86/intel-speed-select/isst-config.c 	ret = fn_ptr(cpu, arg2);
cpu               679 tools/power/x86/intel-speed-select/isst-config.c 		isst_ctdp_display_core_info(cpu, outf, arg3,
cpu               714 tools/power/x86/intel-speed-select/isst-config.c static void dump_isst_config_for_cpu(int cpu, void *arg1, void *arg2,
cpu               721 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_get_process_ctdp(cpu, tdp_level, &pkg_dev);
cpu               725 tools/power/x86/intel-speed-select/isst-config.c 		isst_ctdp_display_information(cpu, outf, tdp_level, &pkg_dev);
cpu               726 tools/power/x86/intel-speed-select/isst-config.c 		isst_get_process_ctdp_complete(cpu, &pkg_dev);
cpu               755 tools/power/x86/intel-speed-select/isst-config.c static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
cpu               760 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_set_tdp_level(cpu, tdp_level);
cpu               764 tools/power/x86/intel-speed-select/isst-config.c 		isst_display_result(cpu, outf, "perf-profile", "set_tdp_level",
cpu               768 tools/power/x86/intel-speed-select/isst-config.c 			int pkg_id = get_physical_package_id(cpu);
cpu               769 tools/power/x86/intel-speed-select/isst-config.c 			int die_id = get_physical_die_id(cpu);
cpu               774 tools/power/x86/intel-speed-select/isst-config.c 			isst_get_coremask_info(cpu, tdp_level, &ctdp_level);
cpu               818 tools/power/x86/intel-speed-select/isst-config.c static void dump_pbf_config_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
cpu               824 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_get_pbf_info(cpu, tdp_level, &pbf_info);
cpu               828 tools/power/x86/intel-speed-select/isst-config.c 		isst_pbf_display_information(cpu, outf, tdp_level, &pbf_info);
cpu               858 tools/power/x86/intel-speed-select/isst-config.c static void set_pbf_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
cpu               864 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_set_pbf_fact_status(cpu, 1, status);
cpu               869 tools/power/x86/intel-speed-select/isst-config.c 			isst_display_result(cpu, outf, "base-freq", "enable",
cpu               872 tools/power/x86/intel-speed-select/isst-config.c 			isst_display_result(cpu, outf, "base-freq", "disable",
cpu               917 tools/power/x86/intel-speed-select/isst-config.c static void dump_fact_config_for_cpu(int cpu, void *arg1, void *arg2,
cpu               923 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_get_fact_info(cpu, tdp_level, &fact_info);
cpu               927 tools/power/x86/intel-speed-select/isst-config.c 		isst_fact_display_information(cpu, outf, tdp_level, fact_bucket,
cpu               960 tools/power/x86/intel-speed-select/isst-config.c static void set_fact_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
cpu               966 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_set_pbf_fact_status(cpu, 0, status);
cpu               973 tools/power/x86/intel-speed-select/isst-config.c 			ret = isst_get_ctdp_levels(cpu, &pkg_dev);
cpu               975 tools/power/x86/intel-speed-select/isst-config.c 				isst_display_result(cpu, outf, "turbo-freq",
cpu               979 tools/power/x86/intel-speed-select/isst-config.c 			ret = isst_set_trl(cpu, fact_trl);
cpu               980 tools/power/x86/intel-speed-select/isst-config.c 			isst_display_result(cpu, outf, "turbo-freq", "enable",
cpu               984 tools/power/x86/intel-speed-select/isst-config.c 			isst_set_trl_from_current_tdp(cpu, fact_trl);
cpu               985 tools/power/x86/intel-speed-select/isst-config.c 			isst_display_result(cpu, outf, "turbo-freq", "disable",
cpu              1035 tools/power/x86/intel-speed-select/isst-config.c static void enable_clos_qos_config(int cpu, void *arg1, void *arg2, void *arg3,
cpu              1041 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_pm_qos_config(cpu, status, clos_priority_type);
cpu              1046 tools/power/x86/intel-speed-select/isst-config.c 			isst_display_result(cpu, outf, "core-power", "enable",
cpu              1049 tools/power/x86/intel-speed-select/isst-config.c 			isst_display_result(cpu, outf, "core-power", "disable",
cpu              1101 tools/power/x86/intel-speed-select/isst-config.c static void dump_clos_config_for_cpu(int cpu, void *arg1, void *arg2,
cpu              1107 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_pm_get_clos(cpu, current_clos, &clos_config);
cpu              1111 tools/power/x86/intel-speed-select/isst-config.c 		isst_clos_display_information(cpu, outf, current_clos,
cpu              1139 tools/power/x86/intel-speed-select/isst-config.c static void get_clos_info_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
cpu              1144 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_clos_get_clos_information(cpu, &enable, &prio_type);
cpu              1148 tools/power/x86/intel-speed-select/isst-config.c 		isst_clos_display_clos_information(cpu, outf, enable, prio_type);
cpu              1173 tools/power/x86/intel-speed-select/isst-config.c static void set_clos_config_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
cpu              1179 tools/power/x86/intel-speed-select/isst-config.c 	clos_config.pkg_id = get_physical_package_id(cpu);
cpu              1180 tools/power/x86/intel-speed-select/isst-config.c 	clos_config.die_id = get_physical_die_id(cpu);
cpu              1187 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_set_clos(cpu, current_clos, &clos_config);
cpu              1191 tools/power/x86/intel-speed-select/isst-config.c 		isst_display_result(cpu, outf, "core-power", "config", ret);
cpu              1246 tools/power/x86/intel-speed-select/isst-config.c static void set_clos_assoc_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
cpu              1251 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_clos_associate(cpu, current_clos);
cpu              1255 tools/power/x86/intel-speed-select/isst-config.c 		isst_display_result(cpu, outf, "core-power", "assoc", ret);
cpu              1280 tools/power/x86/intel-speed-select/isst-config.c static void get_clos_assoc_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
cpu              1285 tools/power/x86/intel-speed-select/isst-config.c 	ret = isst_clos_get_assoc_status(cpu, &clos);
cpu              1289 tools/power/x86/intel-speed-select/isst-config.c 		isst_clos_display_assoc_information(cpu, outf, clos);
cpu                 9 tools/power/x86/intel-speed-select/isst-core.c int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev)
cpu                14 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP,
cpu                19 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CONFIG_TDP_GET_LEVELS_INFO resp:%x\n", cpu, resp);
cpu                30 tools/power/x86/intel-speed-select/isst-core.c int isst_get_ctdp_control(int cpu, int config_index,
cpu                36 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP,
cpu                49 tools/power/x86/intel-speed-select/isst-core.c 		cpu, resp, ctdp_level->fact_support, ctdp_level->pbf_support,
cpu                55 tools/power/x86/intel-speed-select/isst-core.c int isst_get_tdp_info(int cpu, int config_index,
cpu                61 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_TDP_INFO,
cpu                71 tools/power/x86/intel-speed-select/isst-core.c 		cpu, config_index, resp, ctdp_level->tdp_ratio,
cpu                76 tools/power/x86/intel-speed-select/isst-core.c int isst_get_pwr_info(int cpu, int config_index,
cpu                82 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_PWR_INFO,
cpu                92 tools/power/x86/intel-speed-select/isst-core.c 		cpu, config_index, resp, ctdp_level->pkg_max_power,
cpu                98 tools/power/x86/intel-speed-select/isst-core.c int isst_get_tjmax_info(int cpu, int config_index,
cpu               104 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_TJMAX_INFO,
cpu               113 tools/power/x86/intel-speed-select/isst-core.c 		cpu, config_index, resp, ctdp_level->t_proc_hot);
cpu               118 tools/power/x86/intel-speed-select/isst-core.c int isst_get_coremask_info(int cpu, int config_index,
cpu               129 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_send_mbox_command(cpu, CONFIG_TDP,
cpu               137 tools/power/x86/intel-speed-select/isst-core.c 			cpu, config_index, i, resp);
cpu               140 tools/power/x86/intel-speed-select/isst-core.c 		set_cpu_mask_from_punit_coremask(cpu, mask,
cpu               145 tools/power/x86/intel-speed-select/isst-core.c 		debug_printf("cpu:%d ctdp:%d mask:%d cpu count:%d\n", cpu,
cpu               152 tools/power/x86/intel-speed-select/isst-core.c int isst_get_get_trl(int cpu, int level, int avx_level, int *trl)
cpu               158 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP,
cpu               166 tools/power/x86/intel-speed-select/isst-core.c 		cpu, req, resp);
cpu               174 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP,
cpu               180 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CONFIG_TDP_GET_TURBO_LIMIT req:%x resp:%x\n", cpu,
cpu               191 tools/power/x86/intel-speed-select/isst-core.c int isst_get_trl_bucket_info(int cpu, unsigned long long *buckets_info)
cpu               195 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d bucket info via MSR\n", cpu);
cpu               199 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_msr_command(cpu, 0x1ae, 0, buckets_info);
cpu               203 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d bucket info via MSR successful 0x%llx\n", cpu,
cpu               209 tools/power/x86/intel-speed-select/isst-core.c int isst_set_tdp_level_msr(int cpu, int tdp_level)
cpu               214 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu: tdp_level via MSR %d\n", cpu, tdp_level);
cpu               216 tools/power/x86/intel-speed-select/isst-core.c 	if (isst_get_config_tdp_lock_status(cpu)) {
cpu               217 tools/power/x86/intel-speed-select/isst-core.c 		debug_printf("cpu: tdp_locked %d\n", cpu);
cpu               224 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_msr_command(cpu, 0x64b, 1, &level);
cpu               228 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu: tdp_level via MSR successful %d\n", cpu, tdp_level);
cpu               233 tools/power/x86/intel-speed-select/isst-core.c int isst_set_tdp_level(int cpu, int tdp_level)
cpu               238 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_SET_LEVEL, 0,
cpu               241 tools/power/x86/intel-speed-select/isst-core.c 		return isst_set_tdp_level_msr(cpu, tdp_level);
cpu               246 tools/power/x86/intel-speed-select/isst-core.c int isst_get_pbf_info(int cpu, int level, struct isst_pbf_info *pbf_info)
cpu               257 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_send_mbox_command(cpu, CONFIG_TDP,
cpu               265 tools/power/x86/intel-speed-select/isst-core.c 			cpu, resp);
cpu               268 tools/power/x86/intel-speed-select/isst-core.c 		set_cpu_mask_from_punit_coremask(cpu, mask,
cpu               275 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP,
cpu               281 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CONFIG_TDP_PBF_GET_P1HI_P1LO_INFO resp:%x\n", cpu,
cpu               289 tools/power/x86/intel-speed-select/isst-core.c 		cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_TDP_INFO, 0, req, &resp);
cpu               293 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CONFIG_TDP_PBF_GET_TDP_INFO resp:%x\n", cpu, resp);
cpu               299 tools/power/x86/intel-speed-select/isst-core.c 		cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_TJ_MAX_INFO, 0, req, &resp);
cpu               303 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CONFIG_TDP_PBF_GET_TJ_MAX_INFO resp:%x\n", cpu,
cpu               316 tools/power/x86/intel-speed-select/isst-core.c int isst_set_pbf_fact_status(int cpu, int pbf, int enable)
cpu               324 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_get_ctdp_levels(cpu, &pkg_dev);
cpu               330 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_get_ctdp_control(cpu, current_level, &ctdp_level);
cpu               352 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP,
cpu               358 tools/power/x86/intel-speed-select/isst-core.c 		     cpu, pbf, req);
cpu               363 tools/power/x86/intel-speed-select/isst-core.c int isst_get_fact_bucket_info(int cpu, int level,
cpu               373 tools/power/x86/intel-speed-select/isst-core.c 			cpu, CONFIG_TDP,
cpu               381 tools/power/x86/intel-speed-select/isst-core.c 			cpu, i, level, resp);
cpu               394 tools/power/x86/intel-speed-select/isst-core.c 				cpu, CONFIG_TDP,
cpu               402 tools/power/x86/intel-speed-select/isst-core.c 				cpu, i, level, k, resp);
cpu               428 tools/power/x86/intel-speed-select/isst-core.c int isst_get_fact_info(int cpu, int level, struct isst_fact_info *fact_info)
cpu               433 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_TDP,
cpu               440 tools/power/x86/intel-speed-select/isst-core.c 		     cpu, resp);
cpu               446 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_get_fact_bucket_info(cpu, level, fact_info->bucket_info);
cpu               451 tools/power/x86/intel-speed-select/isst-core.c int isst_set_trl(int cpu, unsigned long long trl)
cpu               458 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_msr_command(cpu, 0x1AD, 1, &trl);
cpu               465 tools/power/x86/intel-speed-select/isst-core.c int isst_set_trl_from_current_tdp(int cpu, unsigned long long trl)
cpu               477 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_ctdp_levels(cpu, &pkg_dev);
cpu               481 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_get_trl(cpu, pkg_dev.current_level, 0, trl);
cpu               492 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_msr_command(cpu, 0x1AD, 1, &msr_trl);
cpu               500 tools/power/x86/intel-speed-select/isst-core.c int isst_get_config_tdp_lock_status(int cpu)
cpu               505 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_msr_command(cpu, 0x64b, 0, &tdp_control);
cpu               514 tools/power/x86/intel-speed-select/isst-core.c void isst_get_process_ctdp_complete(int cpu, struct isst_pkg_ctdp *pkg_dev)
cpu               531 tools/power/x86/intel-speed-select/isst-core.c int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
cpu               538 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_get_ctdp_levels(cpu, pkg_dev);
cpu               543 tools/power/x86/intel-speed-select/isst-core.c 		     cpu, pkg_dev->enabled, pkg_dev->current_level,
cpu               552 tools/power/x86/intel-speed-select/isst-core.c 		debug_printf("cpu:%d Get Information for TDP level:%d\n", cpu,
cpu               557 tools/power/x86/intel-speed-select/isst-core.c 		ctdp_level->control_cpu = cpu;
cpu               558 tools/power/x86/intel-speed-select/isst-core.c 		ctdp_level->pkg_id = get_physical_package_id(cpu);
cpu               559 tools/power/x86/intel-speed-select/isst-core.c 		ctdp_level->die_id = get_physical_die_id(cpu);
cpu               561 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_ctdp_control(cpu, i, ctdp_level);
cpu               568 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_tdp_info(cpu, i, ctdp_level);
cpu               572 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_pwr_info(cpu, i, ctdp_level);
cpu               576 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_tjmax_info(cpu, i, ctdp_level);
cpu               582 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_coremask_info(cpu, i, ctdp_level);
cpu               586 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_trl_bucket_info(cpu, &ctdp_level->buckets_info);
cpu               590 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_get_trl(cpu, i, 0,
cpu               595 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_get_trl(cpu, i, 1,
cpu               600 tools/power/x86/intel-speed-select/isst-core.c 		ret = isst_get_get_trl(cpu, i, 2,
cpu               606 tools/power/x86/intel-speed-select/isst-core.c 			ret = isst_get_pbf_info(cpu, i, &ctdp_level->pbf_info);
cpu               612 tools/power/x86/intel-speed-select/isst-core.c 			ret = isst_get_fact_info(cpu, i,
cpu               622 tools/power/x86/intel-speed-select/isst-core.c int isst_clos_get_clos_information(int cpu, int *enable, int *type)
cpu               627 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
cpu               632 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CLOS_PM_QOS_CONFIG resp:%x\n", cpu, resp);
cpu               647 tools/power/x86/intel-speed-select/isst-core.c int isst_pm_qos_config(int cpu, int enable_clos, int priority_type)
cpu               652 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
cpu               657 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CLOS_PM_QOS_CONFIG resp:%x\n", cpu, resp);
cpu               671 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG,
cpu               676 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CLOS_PM_QOS_CONFIG priority type:%d req:%x\n", cpu,
cpu               682 tools/power/x86/intel-speed-select/isst-core.c int isst_pm_get_clos(int cpu, int clos, struct isst_clos_config *clos_config)
cpu               687 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_CLOS, clos, 0,
cpu               692 tools/power/x86/intel-speed-select/isst-core.c 	clos_config->pkg_id = get_physical_package_id(cpu);
cpu               693 tools/power/x86/intel-speed-select/isst-core.c 	clos_config->die_id = get_physical_die_id(cpu);
cpu               704 tools/power/x86/intel-speed-select/isst-core.c int isst_set_clos(int cpu, int clos, struct isst_clos_config *clos_config)
cpu               718 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_CLOS, param, req,
cpu               723 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CLOS_PM_CLOS param:%x req:%x\n", cpu, param, req);
cpu               728 tools/power/x86/intel-speed-select/isst-core.c int isst_clos_get_assoc_status(int cpu, int *clos_id)
cpu               734 tools/power/x86/intel-speed-select/isst-core.c 	core_id = find_phy_core_num(cpu);
cpu               737 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PQR_ASSOC, param, 0,
cpu               742 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CLOS_PQR_ASSOC param:%x resp:%x\n", cpu, param,
cpu               749 tools/power/x86/intel-speed-select/isst-core.c int isst_clos_associate(int cpu, int clos_id)
cpu               756 tools/power/x86/intel-speed-select/isst-core.c 	core_id = find_phy_core_num(cpu);
cpu               759 tools/power/x86/intel-speed-select/isst-core.c 	ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PQR_ASSOC, param,
cpu               764 tools/power/x86/intel-speed-select/isst-core.c 	debug_printf("cpu:%d CLOS_PQR_ASSOC param:%x req:%x\n", cpu, param,
cpu               163 tools/power/x86/intel-speed-select/isst-display.c static void print_package_info(int cpu, FILE *outf)
cpu               168 tools/power/x86/intel-speed-select/isst-display.c 		 get_physical_package_id(cpu));
cpu               170 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
cpu               172 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "cpu-%d", cpu);
cpu               176 tools/power/x86/intel-speed-select/isst-display.c static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
cpu               216 tools/power/x86/intel-speed-select/isst-display.c static void _isst_fact_display_information(int cpu, FILE *outf, int level,
cpu               290 tools/power/x86/intel-speed-select/isst-display.c void isst_ctdp_display_core_info(int cpu, FILE *outf, char *prefix,
cpu               297 tools/power/x86/intel-speed-select/isst-display.c 		 get_physical_package_id(cpu));
cpu               299 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
cpu               301 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "cpu-%d", cpu);
cpu               310 tools/power/x86/intel-speed-select/isst-display.c void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
cpu               318 tools/power/x86/intel-speed-select/isst-display.c 		print_package_info(cpu, outf);
cpu               333 tools/power/x86/intel-speed-select/isst-display.c 		j = get_cpu_count(get_physical_die_id(cpu),
cpu               334 tools/power/x86/intel-speed-select/isst-display.c 				  get_physical_die_id(cpu));
cpu               442 tools/power/x86/intel-speed-select/isst-display.c 			_isst_pbf_display_information(cpu, outf, i,
cpu               446 tools/power/x86/intel-speed-select/isst-display.c 			_isst_fact_display_information(cpu, outf, i, 0xff, 0xff,
cpu               465 tools/power/x86/intel-speed-select/isst-display.c void isst_pbf_display_information(int cpu, FILE *outf, int level,
cpu               468 tools/power/x86/intel-speed-select/isst-display.c 	print_package_info(cpu, outf);
cpu               469 tools/power/x86/intel-speed-select/isst-display.c 	_isst_pbf_display_information(cpu, outf, level, pbf_info, 4);
cpu               473 tools/power/x86/intel-speed-select/isst-display.c void isst_fact_display_information(int cpu, FILE *outf, int level,
cpu               477 tools/power/x86/intel-speed-select/isst-display.c 	print_package_info(cpu, outf);
cpu               478 tools/power/x86/intel-speed-select/isst-display.c 	_isst_fact_display_information(cpu, outf, level, fact_bucket, fact_avx,
cpu               483 tools/power/x86/intel-speed-select/isst-display.c void isst_clos_display_information(int cpu, FILE *outf, int clos,
cpu               490 tools/power/x86/intel-speed-select/isst-display.c 		 get_physical_package_id(cpu));
cpu               492 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
cpu               494 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "cpu-%d", cpu);
cpu               527 tools/power/x86/intel-speed-select/isst-display.c void isst_clos_display_clos_information(int cpu, FILE *outf,
cpu               534 tools/power/x86/intel-speed-select/isst-display.c 		 get_physical_package_id(cpu));
cpu               536 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
cpu               538 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "cpu-%d", cpu);
cpu               555 tools/power/x86/intel-speed-select/isst-display.c void isst_clos_display_assoc_information(int cpu, FILE *outf, int clos)
cpu               561 tools/power/x86/intel-speed-select/isst-display.c 		 get_physical_package_id(cpu));
cpu               563 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
cpu               565 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "cpu-%d", cpu);
cpu               578 tools/power/x86/intel-speed-select/isst-display.c void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
cpu               585 tools/power/x86/intel-speed-select/isst-display.c 		 get_physical_package_id(cpu));
cpu               587 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "die-%d", get_physical_die_id(cpu));
cpu               589 tools/power/x86/intel-speed-select/isst-display.c 	snprintf(header, sizeof(header), "cpu-%d", cpu);
cpu               168 tools/power/x86/intel-speed-select/isst.h extern int get_physical_package_id(int cpu);
cpu               169 tools/power/x86/intel-speed-select/isst.h extern int get_physical_die_id(int cpu);
cpu               175 tools/power/x86/intel-speed-select/isst.h extern void set_cpu_mask_from_punit_coremask(int cpu,
cpu               181 tools/power/x86/intel-speed-select/isst.h extern int isst_send_mbox_command(unsigned int cpu, unsigned char command,
cpu               186 tools/power/x86/intel-speed-select/isst.h extern int isst_send_msr_command(unsigned int cpu, unsigned int command,
cpu               189 tools/power/x86/intel-speed-select/isst.h extern int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev);
cpu               190 tools/power/x86/intel-speed-select/isst.h extern int isst_get_coremask_info(int cpu, int config_index,
cpu               192 tools/power/x86/intel-speed-select/isst.h extern int isst_get_process_ctdp(int cpu, int tdp_level,
cpu               194 tools/power/x86/intel-speed-select/isst.h extern void isst_get_process_ctdp_complete(int cpu,
cpu               196 tools/power/x86/intel-speed-select/isst.h extern void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
cpu               198 tools/power/x86/intel-speed-select/isst.h extern void isst_ctdp_display_core_info(int cpu, FILE *outf, char *prefix,
cpu               202 tools/power/x86/intel-speed-select/isst.h extern void isst_pbf_display_information(int cpu, FILE *outf, int level,
cpu               204 tools/power/x86/intel-speed-select/isst.h extern int isst_set_tdp_level(int cpu, int tdp_level);
cpu               205 tools/power/x86/intel-speed-select/isst.h extern int isst_set_tdp_level_msr(int cpu, int tdp_level);
cpu               206 tools/power/x86/intel-speed-select/isst.h extern int isst_set_pbf_fact_status(int cpu, int pbf, int enable);
cpu               207 tools/power/x86/intel-speed-select/isst.h extern int isst_get_pbf_info(int cpu, int level,
cpu               210 tools/power/x86/intel-speed-select/isst.h extern int isst_get_fact_info(int cpu, int level,
cpu               212 tools/power/x86/intel-speed-select/isst.h extern int isst_get_fact_bucket_info(int cpu, int level,
cpu               214 tools/power/x86/intel-speed-select/isst.h extern void isst_fact_display_information(int cpu, FILE *outf, int level,
cpu               217 tools/power/x86/intel-speed-select/isst.h extern int isst_set_trl(int cpu, unsigned long long trl);
cpu               218 tools/power/x86/intel-speed-select/isst.h extern int isst_set_trl_from_current_tdp(int cpu, unsigned long long trl);
cpu               219 tools/power/x86/intel-speed-select/isst.h extern int isst_get_config_tdp_lock_status(int cpu);
cpu               221 tools/power/x86/intel-speed-select/isst.h extern int isst_pm_qos_config(int cpu, int enable_clos, int priority_type);
cpu               222 tools/power/x86/intel-speed-select/isst.h extern int isst_pm_get_clos(int cpu, int clos,
cpu               224 tools/power/x86/intel-speed-select/isst.h extern int isst_set_clos(int cpu, int clos,
cpu               226 tools/power/x86/intel-speed-select/isst.h extern int isst_clos_associate(int cpu, int clos);
cpu               227 tools/power/x86/intel-speed-select/isst.h extern int isst_clos_get_assoc_status(int cpu, int *clos_id);
cpu               228 tools/power/x86/intel-speed-select/isst.h extern void isst_clos_display_information(int cpu, FILE *outf, int clos,
cpu               230 tools/power/x86/intel-speed-select/isst.h extern void isst_clos_display_assoc_information(int cpu, FILE *outf, int clos);
cpu               234 tools/power/x86/intel-speed-select/isst.h extern void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
cpu               237 tools/power/x86/intel-speed-select/isst.h extern int isst_clos_get_clos_information(int cpu, int *enable, int *type);
cpu               238 tools/power/x86/intel-speed-select/isst.h extern void isst_clos_display_clos_information(int cpu, FILE *outf,
cpu               311 tools/power/x86/turbostat/turbostat.c int cpu_is_not_present(int cpu)
cpu               313 tools/power/x86/turbostat/turbostat.c 	return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
cpu               355 tools/power/x86/turbostat/turbostat.c int cpu_migrate(int cpu)
cpu               358 tools/power/x86/turbostat/turbostat.c 	CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
cpu               364 tools/power/x86/turbostat/turbostat.c int get_msr_fd(int cpu)
cpu               369 tools/power/x86/turbostat/turbostat.c 	fd = fd_percpu[cpu];
cpu               374 tools/power/x86/turbostat/turbostat.c 	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
cpu               379 tools/power/x86/turbostat/turbostat.c 	fd_percpu[cpu] = fd;
cpu               384 tools/power/x86/turbostat/turbostat.c int get_msr(int cpu, off_t offset, unsigned long long *msr)
cpu               388 tools/power/x86/turbostat/turbostat.c 	retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset);
cpu               391 tools/power/x86/turbostat/turbostat.c 		err(-1, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset);
cpu              1703 tools/power/x86/turbostat/turbostat.c int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
cpu              1706 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, mp->msr_num, counterp))
cpu              1713 tools/power/x86/turbostat/turbostat.c 				 cpu, mp->path);
cpu              1780 tools/power/x86/turbostat/turbostat.c 	int cpu = t->cpu_id;
cpu              1786 tools/power/x86/turbostat/turbostat.c 	if (cpu_migrate(cpu)) {
cpu              1787 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
cpu              1817 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
cpu              1824 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
cpu              1829 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
cpu              1847 tools/power/x86/turbostat/turbostat.c 					cpu, aperf_time, mperf_time);
cpu              1856 tools/power/x86/turbostat/turbostat.c 		t->irq_count = irqs_per_cpu[cpu];
cpu              1858 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_SMI_COUNT, &msr))
cpu              1863 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
cpu              1868 tools/power/x86/turbostat/turbostat.c 		if (get_mp(cpu, mp, &t->counter[i]))
cpu              1877 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
cpu              1882 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
cpu              1885 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
cpu              1890 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
cpu              1894 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us))
cpu              1898 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
cpu              1904 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
cpu              1910 tools/power/x86/turbostat/turbostat.c 		if (get_mp(cpu, mp, &c->counter[i]))
cpu              1919 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
cpu              1923 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
cpu              1927 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
cpu              1931 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
cpu              1935 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
cpu              1939 tools/power/x86/turbostat/turbostat.c 			if (get_msr(cpu, MSR_ATOM_PKG_C6_RESIDENCY, &p->pc6))
cpu              1942 tools/power/x86/turbostat/turbostat.c 			if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
cpu              1948 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
cpu              1951 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
cpu              1954 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
cpu              1957 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
cpu              1960 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
cpu              1969 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
cpu              1974 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
cpu              1979 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
cpu              1984 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
cpu              1989 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
cpu              1994 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
cpu              1999 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
cpu              2004 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
cpu              2016 tools/power/x86/turbostat/turbostat.c 		if (get_mp(cpu, mp, &p->counter[i]))
cpu              2551 tools/power/x86/turbostat/turbostat.c int cpu_is_first_core_in_package(int cpu)
cpu              2553 tools/power/x86/turbostat/turbostat.c 	return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
cpu              2556 tools/power/x86/turbostat/turbostat.c int get_physical_package_id(int cpu)
cpu              2558 tools/power/x86/turbostat/turbostat.c 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
cpu              2561 tools/power/x86/turbostat/turbostat.c int get_die_id(int cpu)
cpu              2563 tools/power/x86/turbostat/turbostat.c 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
cpu              2566 tools/power/x86/turbostat/turbostat.c int get_core_id(int cpu)
cpu              2568 tools/power/x86/turbostat/turbostat.c 	return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
cpu              2573 tools/power/x86/turbostat/turbostat.c 	int pkg, node, lnode, cpu, cpux;
cpu              2577 tools/power/x86/turbostat/turbostat.c 	for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu)
cpu              2578 tools/power/x86/turbostat/turbostat.c 		cpus[cpu].logical_node_id = -1;
cpu              2583 tools/power/x86/turbostat/turbostat.c 		for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
cpu              2584 tools/power/x86/turbostat/turbostat.c 			if (cpus[cpu].physical_package_id != pkg)
cpu              2587 tools/power/x86/turbostat/turbostat.c 			if (cpus[cpu].logical_node_id != -1)
cpu              2589 tools/power/x86/turbostat/turbostat.c 			cpus[cpu].logical_node_id = lnode;
cpu              2590 tools/power/x86/turbostat/turbostat.c 			node = cpus[cpu].physical_node_id;
cpu              2596 tools/power/x86/turbostat/turbostat.c 			for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) {
cpu              2617 tools/power/x86/turbostat/turbostat.c 	int cpu = thiscpu->logical_cpu_id;
cpu              2621 tools/power/x86/turbostat/turbostat.c 			cpu, i);
cpu              2637 tools/power/x86/turbostat/turbostat.c 	int cpu = thiscpu->logical_cpu_id;
cpu              2652 tools/power/x86/turbostat/turbostat.c 		"/sys/devices/system/cpu/cpu%d/topology/thread_siblings", cpu);
cpu              2664 tools/power/x86/turbostat/turbostat.c 					if ((so != cpu) &&
cpu              2788 tools/power/x86/turbostat/turbostat.c int count_cpus(int cpu)
cpu              2793 tools/power/x86/turbostat/turbostat.c int mark_cpu_present(int cpu)
cpu              2795 tools/power/x86/turbostat/turbostat.c 	CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
cpu              2799 tools/power/x86/turbostat/turbostat.c int init_thread_id(int cpu)
cpu              2801 tools/power/x86/turbostat/turbostat.c 	cpus[cpu].thread_id = -1;
cpu              3598 tools/power/x86/turbostat/turbostat.c 	int cpu;
cpu              3603 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
cpu              3609 tools/power/x86/turbostat/turbostat.c 	if (cpu_migrate(cpu)) {
cpu              3610 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
cpu              3614 tools/power/x86/turbostat/turbostat.c 	if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
cpu              3631 tools/power/x86/turbostat/turbostat.c 	fprintf(outf, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
cpu              3642 tools/power/x86/turbostat/turbostat.c 	int cpu;
cpu              3647 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
cpu              3653 tools/power/x86/turbostat/turbostat.c 	if (cpu_migrate(cpu)) {
cpu              3654 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
cpu              3658 tools/power/x86/turbostat/turbostat.c 	if (get_msr(cpu, MSR_PM_ENABLE, &msr))
cpu              3662 tools/power/x86/turbostat/turbostat.c 		cpu, msr, (msr & (1 << 0)) ? "" : "No-");
cpu              3668 tools/power/x86/turbostat/turbostat.c 	if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr))
cpu              3673 tools/power/x86/turbostat/turbostat.c 			cpu, msr,
cpu              3679 tools/power/x86/turbostat/turbostat.c 	if (get_msr(cpu, MSR_HWP_REQUEST, &msr))
cpu              3684 tools/power/x86/turbostat/turbostat.c 			cpu, msr,
cpu              3693 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr))
cpu              3698 tools/power/x86/turbostat/turbostat.c 			cpu, msr,
cpu              3706 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr))
cpu              3711 tools/power/x86/turbostat/turbostat.c 			cpu, msr,
cpu              3715 tools/power/x86/turbostat/turbostat.c 	if (get_msr(cpu, MSR_HWP_STATUS, &msr))
cpu              3720 tools/power/x86/turbostat/turbostat.c 			cpu, msr,
cpu              3733 tools/power/x86/turbostat/turbostat.c 	int cpu;
cpu              3735 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
cpu              3741 tools/power/x86/turbostat/turbostat.c 	if (cpu_migrate(cpu)) {
cpu              3742 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
cpu              3747 tools/power/x86/turbostat/turbostat.c 		get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
cpu              3748 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
cpu              3782 tools/power/x86/turbostat/turbostat.c 		get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
cpu              3783 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
cpu              3804 tools/power/x86/turbostat/turbostat.c 		get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
cpu              3805 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
cpu              4101 tools/power/x86/turbostat/turbostat.c 	int cpu;
cpu              4106 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
cpu              4112 tools/power/x86/turbostat/turbostat.c 	if (cpu_migrate(cpu)) {
cpu              4113 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
cpu              4118 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
cpu              4123 tools/power/x86/turbostat/turbostat.c 			cpu, msr, tcc_activation_temp - dts);
cpu              4125 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
cpu              4131 tools/power/x86/turbostat/turbostat.c 			cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
cpu              4138 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
cpu              4144 tools/power/x86/turbostat/turbostat.c 			cpu, msr, tcc_activation_temp - dts, resolution);
cpu              4146 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
cpu              4152 tools/power/x86/turbostat/turbostat.c 			cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
cpu              4158 tools/power/x86/turbostat/turbostat.c void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
cpu              4161 tools/power/x86/turbostat/turbostat.c 		cpu, label,
cpu              4174 tools/power/x86/turbostat/turbostat.c 	int cpu;
cpu              4183 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
cpu              4184 tools/power/x86/turbostat/turbostat.c 	if (cpu_migrate(cpu)) {
cpu              4185 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
cpu              4191 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
cpu              4195 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
cpu              4199 tools/power/x86/turbostat/turbostat.c 	fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
cpu              4204 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
cpu              4209 tools/power/x86/turbostat/turbostat.c 			cpu, msr,
cpu              4218 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
cpu              4222 tools/power/x86/turbostat/turbostat.c 			cpu, msr, (msr >> 63) & 1 ? "" : "UN");
cpu              4224 tools/power/x86/turbostat/turbostat.c 		print_power_limit_msr(cpu, msr, "PKG Limit #1");
cpu              4226 tools/power/x86/turbostat/turbostat.c 			cpu,
cpu              4234 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
cpu              4238 tools/power/x86/turbostat/turbostat.c 			cpu, msr,
cpu              4245 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
cpu              4248 tools/power/x86/turbostat/turbostat.c 				cpu, msr, (msr >> 31) & 1 ? "" : "UN");
cpu              4250 tools/power/x86/turbostat/turbostat.c 		print_power_limit_msr(cpu, msr, "DRAM Limit");
cpu              4253 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PP0_POLICY, &msr))
cpu              4256 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
cpu              4259 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
cpu              4262 tools/power/x86/turbostat/turbostat.c 				cpu, msr, (msr >> 31) & 1 ? "" : "UN");
cpu              4263 tools/power/x86/turbostat/turbostat.c 		print_power_limit_msr(cpu, msr, "Cores Limit");
cpu              4266 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PP1_POLICY, &msr))
cpu              4269 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
cpu              4271 tools/power/x86/turbostat/turbostat.c 		if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
cpu              4274 tools/power/x86/turbostat/turbostat.c 				cpu, msr, (msr >> 31) & 1 ? "" : "UN");
cpu              4275 tools/power/x86/turbostat/turbostat.c 		print_power_limit_msr(cpu, msr, "GFX Limit");
cpu              4460 tools/power/x86/turbostat/turbostat.c 	int cpu;
cpu              4470 tools/power/x86/turbostat/turbostat.c 	cpu = t->cpu_id;
cpu              4471 tools/power/x86/turbostat/turbostat.c 	if (cpu_migrate(cpu)) {
cpu              4472 tools/power/x86/turbostat/turbostat.c 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
cpu              4479 tools/power/x86/turbostat/turbostat.c 			cpu, tcc_activation_temp);
cpu              4494 tools/power/x86/turbostat/turbostat.c 			cpu, msr, target_c_local);
cpu              4506 tools/power/x86/turbostat/turbostat.c 		cpu, tcc_activation_temp);
cpu               361 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	int cpu;
cpu               366 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	for (cpu = 0; cpu <= max_cpu_num; ++cpu) {
cpu               367 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		if (CPU_ISSET_S(cpu, cpu_setsize, cpu_selected_set))
cpu               368 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			if (!CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
cpu               369 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				errx(1, "Requested cpu% is not present", cpu);
cpu               376 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	int cpu = 0;
cpu               402 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			while (cpu <= end_cpu) {
cpu               403 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				if (cpu > max_cpu_num)
cpu               404 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 					errx(1, "Requested cpu%d exceeds max cpu%d", cpu, max_cpu_num);
cpu               405 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
cpu               406 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu++;
cpu               413 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			for (cpu = 0; cpu <= max_cpu_num; cpu += 1) {
cpu               414 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
cpu               415 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 					CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
cpu               423 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			for (cpu = 0; cpu <= max_cpu_num; cpu += 2) {
cpu               424 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
cpu               425 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 					CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
cpu               434 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			for (cpu = 1; cpu <= max_cpu_num; cpu += 2) {
cpu               435 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
cpu               436 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 					CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
cpu               443 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		cpu = strtol(startp, &endp, 10);
cpu               446 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		if (cpu > max_cpu_num)
cpu               447 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			errx(1, "Requested cpu%d exceeds max cpu%d", cpu, max_cpu_num);
cpu               448 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
cpu               626 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int get_msr(int cpu, int offset, unsigned long long *msr)
cpu               632 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
cpu               642 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr);
cpu               648 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int put_msr(int cpu, int offset, unsigned long long new_msr)
cpu               654 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
cpu               661 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		err(-2, "pwrite(cpu%d, offset 0x%x, 0x%llx) = %d", cpu, offset, new_msr, retval);
cpu               666 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		fprintf(stderr, "put_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, new_msr);
cpu               671 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c void print_hwp_cap(int cpu, struct msr_hwp_cap *cap, char *str)
cpu               673 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	if (cpu != -1)
cpu               674 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		printf("cpu%d: ", cpu);
cpu               679 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset)
cpu               683 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	get_msr(cpu, msr_offset, &msr);
cpu               691 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c void print_hwp_request(int cpu, struct msr_hwp_request *h, char *str)
cpu               693 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	if (cpu != -1)
cpu               694 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		printf("cpu%d: ", cpu);
cpu               714 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
cpu               718 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	get_msr(cpu, msr_offset, &msr);
cpu               728 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
cpu               734 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			cpu, hwp_req->hwp_min, hwp_req->hwp_max,
cpu               745 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	put_msr(cpu, msr_offset, msr);
cpu               748 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int print_cpu_msrs(int cpu)
cpu               755 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr);
cpu               757 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		printf("cpu%d: EPB %u\n", cpu, (unsigned int) msr);
cpu               763 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	read_hwp_request(cpu, &req, MSR_HWP_REQUEST);
cpu               764 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	print_hwp_request(cpu, &req, "");
cpu               766 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
cpu               767 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	print_hwp_cap(cpu, &cap, "");
cpu               817 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c void update_cpufreq_scaling_freq(int is_max, int cpu, unsigned int ratio)
cpu               825 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		cpu, is_max ? "max" : "min");
cpu               851 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int update_sysfs(int cpu)
cpu               863 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		update_cpufreq_scaling_freq(0, cpu, req_update.hwp_min);
cpu               866 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		update_cpufreq_scaling_freq(1, cpu, req_update.hwp_max);
cpu               871 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int verify_hwp_req_self_consistency(int cpu, struct msr_hwp_request *req)
cpu               876 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			cpu, req->hwp_min, req->hwp_max);
cpu               882 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			cpu, req->hwp_desired, req->hwp_max);
cpu               887 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			cpu, req->hwp_desired, req->hwp_min);
cpu               893 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int check_hwp_request_v_hwp_capabilities(int cpu, struct msr_hwp_request *req, struct msr_hwp_cap *cap)
cpu               898 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_max, cap->highest);
cpu               901 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_max, cap->lowest);
cpu               907 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_min, cap->highest);
cpu               910 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_min, cap->lowest);
cpu               915 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			cpu, req->hwp_min, req->hwp_max);
cpu               920 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_desired, req->hwp_max);
cpu               923 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_desired, req->hwp_min);
cpu               926 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_desired, cap->lowest);
cpu               929 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, req->hwp_desired, cap->highest);
cpu               935 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int update_hwp_request(int cpu)
cpu               942 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	read_hwp_request(cpu, &req, msr_offset);
cpu               944 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		print_hwp_request(cpu, &req, "old: ");
cpu               963 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
cpu               965 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		print_hwp_cap(cpu, &cap, "");
cpu               968 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		check_hwp_request_v_hwp_capabilities(cpu, &req, &cap);
cpu               970 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	verify_hwp_req_self_consistency(cpu, &req);
cpu               972 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	write_hwp_request(cpu, &req, msr_offset);
cpu               975 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		read_hwp_request(cpu, &req, msr_offset);
cpu               976 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		print_hwp_request(cpu, &req, "new: ");
cpu               984 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	int cpu = first_cpu_in_pkg[pkg];
cpu               988 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	read_hwp_request(cpu, &req, msr_offset);
cpu              1007 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
cpu              1009 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		print_hwp_cap(cpu, &cap, "");
cpu              1012 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		check_hwp_request_v_hwp_capabilities(cpu, &req, &cap);
cpu              1014 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	verify_hwp_req_self_consistency(cpu, &req);
cpu              1016 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	write_hwp_request(cpu, &req, msr_offset);
cpu              1019 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		read_hwp_request(cpu, &req, msr_offset);
cpu              1025 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int enable_hwp_on_cpu(int cpu)
cpu              1029 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	get_msr(cpu, MSR_PM_ENABLE, &msr);
cpu              1030 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	put_msr(cpu, MSR_PM_ENABLE, 1);
cpu              1033 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		printf("cpu%d: MSR_PM_ENABLE old: %d new: %d\n", cpu, (unsigned int) msr, 1);
cpu              1038 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int update_cpu_msrs(int cpu)
cpu              1044 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr);
cpu              1045 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		put_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, new_epb);
cpu              1049 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				cpu, (unsigned int) msr, (unsigned int) new_epb);
cpu              1055 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		get_msr(cpu, MSR_IA32_MISC_ENABLE, &msr);
cpu              1062 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
cpu              1064 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 					printf("cpu%d: turbo ENABLE\n", cpu);
cpu              1073 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 			put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
cpu              1075 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 				printf("cpu%d: turbo DISABLE\n", cpu);
cpu              1085 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	update_hwp_request(cpu);
cpu              1101 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c unsigned int get_pkg_num(int cpu)
cpu              1108 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	sprintf(pathname, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
cpu              1117 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int set_max_cpu_pkg_num(int cpu)
cpu              1121 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	if (max_cpu_num < cpu)
cpu              1122 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		max_cpu_num = cpu;
cpu              1124 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	pkg = get_pkg_num(cpu);
cpu              1127 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		errx(1, "cpu%d: %d >= MAX_PACKAGES (%d)", cpu, pkg, MAX_PACKAGES);
cpu              1134 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 		first_cpu_in_pkg[pkg] = cpu;
cpu              1139 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c int mark_cpu_present(int cpu)
cpu              1141 tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c 	CPU_SET_S(cpu, cpu_setsize, cpu_present_set);
cpu                10 tools/testing/radix-tree/linux/percpu.h #define per_cpu_ptr(ptr, cpu)   ({ (void)(cpu); (ptr); })
cpu                11 tools/testing/radix-tree/linux/percpu.h #define per_cpu(var, cpu)	(*per_cpu_ptr(&(var), cpu))
cpu                58 tools/testing/radix-tree/test.h int radix_tree_cpu_dead(unsigned int cpu);
cpu                28 tools/testing/selftests/bpf/bpf_util.h #define bpf_percpu(name, cpu) name[(cpu)].v
cpu                23 tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
cpu                 9 tools/testing/selftests/bpf/prog_tests/perf_buffer.c static void on_sample(void *ctx, int cpu, void *data, __u32 size)
cpu                14 tools/testing/selftests/bpf/prog_tests/perf_buffer.c 	if (cpu_data != cpu)
cpu                15 tools/testing/selftests/bpf/prog_tests/perf_buffer.c 		CHECK(cpu_data != cpu, "check_cpu_data",
cpu                16 tools/testing/selftests/bpf/prog_tests/perf_buffer.c 		      "cpu_data %d != cpu %d\n", cpu_data, cpu);
cpu                18 tools/testing/selftests/bpf/prog_tests/perf_buffer.c 	CPU_SET(cpu, cpu_seen);
cpu                17 tools/testing/selftests/bpf/progs/test_perf_buffer.c 	int cpu = bpf_get_smp_processor_id();
cpu                20 tools/testing/selftests/bpf/progs/test_perf_buffer.c 			      &cpu, sizeof(cpu));
cpu               125 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	int cpu;
cpu              4644 tools/testing/selftests/bpf/test_btf.c 	int cpu;
cpu              4649 tools/testing/selftests/bpf/test_btf.c 		for (cpu = 0; cpu < num_cpus; cpu++) {
cpu              4650 tools/testing/selftests/bpf/test_btf.c 			v->ui32 = i + cpu;
cpu              4659 tools/testing/selftests/bpf/test_btf.c 			v->si8_4[0][0] = (cpu + i) & 0xff;
cpu              4660 tools/testing/selftests/bpf/test_btf.c 			v->si8_4[0][1] = (cpu + i + 1) & 0xff;
cpu              4661 tools/testing/selftests/bpf/test_btf.c 			v->si8_4[1][0] = (cpu + i + 2) & 0xff;
cpu              4662 tools/testing/selftests/bpf/test_btf.c 			v->si8_4[1][1] = (cpu + i + 3) & 0xff;
cpu              4671 tools/testing/selftests/bpf/test_btf.c 		for (cpu = 0; cpu < num_cpus; cpu++) {
cpu              4686 tools/testing/selftests/bpf/test_btf.c 				 int cpu, void *mapv)
cpu              4698 tools/testing/selftests/bpf/test_btf.c 					  percpu_map ? cpu : next_key,
cpu              4723 tools/testing/selftests/bpf/test_btf.c 					  percpu_map ? cpu : next_key,
cpu              4858 tools/testing/selftests/bpf/test_btf.c 		int cpu;
cpu              4864 tools/testing/selftests/bpf/test_btf.c 		for (cpu = 0; cpu < num_cpus; cpu++) {
cpu              4876 tools/testing/selftests/bpf/test_btf.c 				if (cpu == 0) {
cpu              4897 tools/testing/selftests/bpf/test_btf.c 								  cpu, cmapv);
cpu                44 tools/testing/selftests/bpf/test_cgroup_storage.c 	int cpu, nproc;
cpu               110 tools/testing/selftests/bpf/test_cgroup_storage.c 	for (cpu = 0; cpu < nproc; cpu++)
cpu               111 tools/testing/selftests/bpf/test_cgroup_storage.c 		percpu_value[cpu] = 1000;
cpu               159 tools/testing/selftests/bpf/test_cgroup_storage.c 	for (cpu = 0; cpu < nproc; cpu++)
cpu               160 tools/testing/selftests/bpf/test_cgroup_storage.c 		percpu_value[cpu] = 0;
cpu               168 tools/testing/selftests/bpf/test_cgroup_storage.c 	for (cpu = 0; cpu < nproc; cpu++)
cpu               169 tools/testing/selftests/bpf/test_cgroup_storage.c 		value += percpu_value[cpu];
cpu                45 tools/testing/selftests/bpf/test_netcnt.c 	int cpu, nproc;
cpu               127 tools/testing/selftests/bpf/test_netcnt.c 	for (cpu = 0; cpu < nproc; cpu++) {
cpu               128 tools/testing/selftests/bpf/test_netcnt.c 		if (percpu_netcnt[cpu].packets > MAX_PERCPU_PACKETS) {
cpu               130 tools/testing/selftests/bpf/test_netcnt.c 			       percpu_netcnt[cpu].packets);
cpu               134 tools/testing/selftests/bpf/test_netcnt.c 		packets += percpu_netcnt[cpu].packets;
cpu               135 tools/testing/selftests/bpf/test_netcnt.c 		bytes += percpu_netcnt[cpu].bytes;
cpu                34 tools/testing/selftests/bpf/test_tcpnotify_user.c static void dummyfn(void *ctx, int cpu, void *data, __u32 size)
cpu                24 tools/testing/selftests/breakpoints/step_after_suspend_test.c void child(int cpu)
cpu                29 tools/testing/selftests/breakpoints/step_after_suspend_test.c 	CPU_SET(cpu, &set);
cpu                50 tools/testing/selftests/breakpoints/step_after_suspend_test.c bool run_test(int cpu)
cpu                61 tools/testing/selftests/breakpoints/step_after_suspend_test.c 		child(cpu);
cpu               170 tools/testing/selftests/breakpoints/step_after_suspend_test.c 	int cpu;
cpu               186 tools/testing/selftests/breakpoints/step_after_suspend_test.c 	for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
cpu               187 tools/testing/selftests/breakpoints/step_after_suspend_test.c 		if (!CPU_ISSET(cpu, &available_cpus))
cpu               200 tools/testing/selftests/breakpoints/step_after_suspend_test.c 	for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
cpu               203 tools/testing/selftests/breakpoints/step_after_suspend_test.c 		if (!CPU_ISSET(cpu, &available_cpus))
cpu               206 tools/testing/selftests/breakpoints/step_after_suspend_test.c 		test_success = run_test(cpu);
cpu               208 tools/testing/selftests/breakpoints/step_after_suspend_test.c 			ksft_test_result_pass("CPU %d\n", cpu);
cpu               210 tools/testing/selftests/breakpoints/step_after_suspend_test.c 			ksft_test_result_fail("CPU %d\n", cpu);
cpu                20 tools/testing/selftests/intel_pstate/aperf.c 	unsigned int i, cpu, fd;
cpu                35 tools/testing/selftests/intel_pstate/aperf.c 	cpu = strtol(argv[1], (char **) NULL, 10);
cpu                42 tools/testing/selftests/intel_pstate/aperf.c 	sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
cpu                46 tools/testing/selftests/intel_pstate/aperf.c 		printf("/dev/cpu/%d/msr: %s\n", cpu, strerror(errno));
cpu                51 tools/testing/selftests/intel_pstate/aperf.c 	CPU_SET(cpu, &cpuset);
cpu                15 tools/testing/selftests/intel_pstate/msr.c 	int cpu, fd;
cpu                23 tools/testing/selftests/intel_pstate/msr.c 	cpu = strtol(argv[1], (char **) NULL, 10);
cpu                28 tools/testing/selftests/intel_pstate/msr.c 	sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
cpu               542 tools/testing/selftests/mqueue/mq_perf_tests.c 	int i, cpu, rc;
cpu               575 tools/testing/selftests/mqueue/mq_perf_tests.c 				cpu = atoi(option);
cpu               576 tools/testing/selftests/mqueue/mq_perf_tests.c 				if (cpu >= cpus_online)
cpu               579 tools/testing/selftests/mqueue/mq_perf_tests.c 						cpu);
cpu               581 tools/testing/selftests/mqueue/mq_perf_tests.c 					cpus_to_pin[num_cpus_to_pin++] = cpu;
cpu               587 tools/testing/selftests/mqueue/mq_perf_tests.c 			for (cpu = 0; cpu < num_cpus_to_pin; cpu++) {
cpu               588 tools/testing/selftests/mqueue/mq_perf_tests.c 				if (CPU_ISSET_S(cpus_to_pin[cpu], cpu_set_size,
cpu               594 tools/testing/selftests/mqueue/mq_perf_tests.c 					CPU_SET_S(cpus_to_pin[cpu],
cpu               684 tools/testing/selftests/mqueue/mq_perf_tests.c 	for (cpu = 1; cpu < num_cpus_to_pin; cpu++)
cpu               685 tools/testing/selftests/mqueue/mq_perf_tests.c 			printf(",%d", cpus_to_pin[cpu]);
cpu               121 tools/testing/selftests/net/msg_zerocopy.c static int do_setcpu(int cpu)
cpu               126 tools/testing/selftests/net/msg_zerocopy.c 	CPU_SET(cpu, &mask);
cpu               128 tools/testing/selftests/net/msg_zerocopy.c 		error(1, 0, "setaffinity %d", cpu);
cpu               131 tools/testing/selftests/net/msg_zerocopy.c 		fprintf(stderr, "cpu: %u\n", cpu);
cpu               187 tools/testing/selftests/net/reuseport_bpf_cpu.c 	int epfd, cpu;
cpu               195 tools/testing/selftests/net/reuseport_bpf_cpu.c 	for (cpu = 0; cpu < len; ++cpu) {
cpu               197 tools/testing/selftests/net/reuseport_bpf_cpu.c 		ev.data.fd = rcv_fd[cpu];
cpu               198 tools/testing/selftests/net/reuseport_bpf_cpu.c 		if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fd[cpu], &ev))
cpu               203 tools/testing/selftests/net/reuseport_bpf_cpu.c 	for (cpu = 0; cpu < len; ++cpu) {
cpu               204 tools/testing/selftests/net/reuseport_bpf_cpu.c 		send_from_cpu(cpu, family, proto);
cpu               205 tools/testing/selftests/net/reuseport_bpf_cpu.c 		receive_on_cpu(rcv_fd, len, epfd, cpu, proto);
cpu               209 tools/testing/selftests/net/reuseport_bpf_cpu.c 	for (cpu = len - 1; cpu >= 0; --cpu) {
cpu               210 tools/testing/selftests/net/reuseport_bpf_cpu.c 		send_from_cpu(cpu, family, proto);
cpu               211 tools/testing/selftests/net/reuseport_bpf_cpu.c 		receive_on_cpu(rcv_fd, len, epfd, cpu, proto);
cpu               215 tools/testing/selftests/net/reuseport_bpf_cpu.c 	for (cpu = 0; cpu < len; cpu += 2) {
cpu               216 tools/testing/selftests/net/reuseport_bpf_cpu.c 		send_from_cpu(cpu, family, proto);
cpu               217 tools/testing/selftests/net/reuseport_bpf_cpu.c 		receive_on_cpu(rcv_fd, len, epfd, cpu, proto);
cpu               221 tools/testing/selftests/net/reuseport_bpf_cpu.c 	for (cpu = 1; cpu < len; cpu += 2) {
cpu               222 tools/testing/selftests/net/reuseport_bpf_cpu.c 		send_from_cpu(cpu, family, proto);
cpu               223 tools/testing/selftests/net/reuseport_bpf_cpu.c 		receive_on_cpu(rcv_fd, len, epfd, cpu, proto);
cpu               227 tools/testing/selftests/net/reuseport_bpf_cpu.c 	for (cpu = 0; cpu < len; ++cpu)
cpu               228 tools/testing/selftests/net/reuseport_bpf_cpu.c 		close(rcv_fd[cpu]);
cpu               103 tools/testing/selftests/net/udpgso_bench_tx.c static int set_cpu(int cpu)
cpu               108 tools/testing/selftests/net/udpgso_bench_tx.c 	CPU_SET(cpu, &mask);
cpu               110 tools/testing/selftests/net/udpgso_bench_tx.c 		error(1, 0, "setaffinity %d", cpu);
cpu                73 tools/testing/selftests/powerpc/benchmarks/context_switch.c static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
cpu                81 tools/testing/selftests/powerpc/benchmarks/context_switch.c 	CPU_SET(cpu, &cpuset);
cpu               105 tools/testing/selftests/powerpc/benchmarks/context_switch.c static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
cpu               120 tools/testing/selftests/powerpc/benchmarks/context_switch.c 	CPU_SET(cpu, &cpuset);
cpu               410 tools/testing/selftests/powerpc/benchmarks/context_switch.c 	static void (*start_fn)(void *(*fn)(void *), void *arg, unsigned long cpu);
cpu                30 tools/testing/selftests/powerpc/benchmarks/fork.c static void set_cpu(int cpu)
cpu                34 tools/testing/selftests/powerpc/benchmarks/fork.c 	if (cpu == -1)
cpu                38 tools/testing/selftests/powerpc/benchmarks/fork.c 	CPU_SET(cpu, &cpuset);
cpu                46 tools/testing/selftests/powerpc/benchmarks/fork.c static void start_process_on(void *(*fn)(void *), void *arg, int cpu)
cpu                59 tools/testing/selftests/powerpc/benchmarks/fork.c 	set_cpu(cpu);
cpu                66 tools/testing/selftests/powerpc/benchmarks/fork.c static int cpu;
cpu               148 tools/testing/selftests/powerpc/benchmarks/fork.c 	if (cpu != -1) {
cpu               150 tools/testing/selftests/powerpc/benchmarks/fork.c 		CPU_SET(cpu, &cpuset);
cpu               291 tools/testing/selftests/powerpc/benchmarks/fork.c 		cpu = -1;
cpu               293 tools/testing/selftests/powerpc/benchmarks/fork.c 		cpu = atoi(argv[optind++]);
cpu               299 tools/testing/selftests/powerpc/benchmarks/fork.c 	set_cpu(cpu);
cpu               312 tools/testing/selftests/powerpc/benchmarks/fork.c 	printf(" on cpu %d\n", cpu);
cpu               319 tools/testing/selftests/powerpc/benchmarks/fork.c 	start_process_on(bench_proc, NULL, cpu);
cpu                39 tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c 	int cpu;
cpu                41 tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c 	for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
cpu                43 tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c 		CPU_SET(cpu, &mask);
cpu               398 tools/testing/selftests/powerpc/mm/tlbie_test.c static void set_pthread_cpu(pthread_t th, int cpu)
cpu               404 tools/testing/selftests/powerpc/mm/tlbie_test.c 	CPU_SET(cpu, &run_cpu_mask);
cpu               414 tools/testing/selftests/powerpc/mm/tlbie_test.c static void set_mycpu(int cpu)
cpu               420 tools/testing/selftests/powerpc/mm/tlbie_test.c 	CPU_SET(cpu, &run_cpu_mask);
cpu                22 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c static int setup_cpu_event(struct event *event, int cpu)
cpu                33 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c 	FAIL_IF(event_open_with_cpu(event, cpu));
cpu                43 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c 	int cpu, rc;
cpu                48 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c 	cpu = pick_online_cpu();
cpu                49 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c 	FAIL_IF(cpu < 0);
cpu                50 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c 	FAIL_IF(bind_to_cpu(cpu));
cpu                62 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c 	rc = setup_cpu_event(&event, cpu);
cpu                22 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c static int setup_cpu_event(struct event *event, int cpu)
cpu                31 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c 	FAIL_IF(event_open_with_cpu(event, cpu));
cpu                41 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c 	int cpu, rc;
cpu                46 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c 	cpu = pick_online_cpu();
cpu                47 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c 	FAIL_IF(cpu < 0);
cpu                48 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c 	FAIL_IF(bind_to_cpu(cpu));
cpu                60 tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c 	rc = setup_cpu_event(&event, cpu);
cpu                22 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c static int setup_cpu_event(struct event *event, int cpu)
cpu                31 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c 	FAIL_IF(event_open_with_cpu(event, cpu));
cpu                41 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c 	int cpu, rc;
cpu                46 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c 	cpu = pick_online_cpu();
cpu                47 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c 	FAIL_IF(cpu < 0);
cpu                48 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c 	FAIL_IF(bind_to_cpu(cpu));
cpu                63 tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c 	rc = setup_cpu_event(&event, cpu);
cpu                80 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c 	int cpu, rc, i;
cpu                84 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c 	cpu = pick_online_cpu();
cpu                85 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c 	FAIL_IF(cpu < 0);
cpu                86 tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c 	FAIL_IF(bind_to_cpu(cpu));
cpu                16 tools/testing/selftests/powerpc/pmu/event.c int perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu,
cpu                19 tools/testing/selftests/powerpc/pmu/event.c 	return syscall(__NR_perf_event_open, attr, pid, cpu,
cpu                52 tools/testing/selftests/powerpc/pmu/event.c int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd)
cpu                54 tools/testing/selftests/powerpc/pmu/event.c 	e->fd = perf_event_open(&e->attr, pid, cpu, group_fd, 0);
cpu                73 tools/testing/selftests/powerpc/pmu/event.c int event_open_with_cpu(struct event *e, int cpu)
cpu                75 tools/testing/selftests/powerpc/pmu/event.c 	return event_open_with_options(e, PERF_NO_PID, cpu, PERF_NO_GROUP);
cpu                30 tools/testing/selftests/powerpc/pmu/event.h int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd);
cpu                33 tools/testing/selftests/powerpc/pmu/event.h int event_open_with_cpu(struct event *e, int cpu);
cpu                18 tools/testing/selftests/powerpc/pmu/lib.c int bind_to_cpu(int cpu)
cpu                22 tools/testing/selftests/powerpc/pmu/lib.c 	printf("Binding to cpu %d\n", cpu);
cpu                25 tools/testing/selftests/powerpc/pmu/lib.c 	CPU_SET(cpu, &mask);
cpu               119 tools/testing/selftests/powerpc/pmu/lib.c 	int cpu, rc;
cpu               122 tools/testing/selftests/powerpc/pmu/lib.c 	cpu = pick_online_cpu();
cpu               123 tools/testing/selftests/powerpc/pmu/lib.c 	FAIL_IF(cpu < 0);
cpu               124 tools/testing/selftests/powerpc/pmu/lib.c 	FAIL_IF(bind_to_cpu(cpu));
cpu                22 tools/testing/selftests/powerpc/pmu/lib.h extern int bind_to_cpu(int cpu);
cpu                38 tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c 				      int cpu, int group_fd,
cpu                42 tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c 	return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
cpu                41 tools/testing/selftests/powerpc/tm/tm-tmspr.c 	int i, cpu;
cpu                46 tools/testing/selftests/powerpc/tm/tm-tmspr.c 	cpu = (unsigned long)in >> 1;
cpu                47 tools/testing/selftests/powerpc/tm/tm-tmspr.c 	CPU_SET(cpu, &cpuset);
cpu                92 tools/testing/selftests/powerpc/utils.c 	int cpu;
cpu               102 tools/testing/selftests/powerpc/utils.c 	for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
cpu               103 tools/testing/selftests/powerpc/utils.c 		if (CPU_ISSET(cpu, &mask))
cpu               104 tools/testing/selftests/powerpc/utils.c 			return cpu;
cpu               107 tools/testing/selftests/powerpc/utils.c 	for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
cpu               108 tools/testing/selftests/powerpc/utils.c 		if (CPU_ISSET(cpu, &mask))
cpu               109 tools/testing/selftests/powerpc/utils.c 			return cpu;
cpu               175 tools/testing/selftests/powerpc/utils.c 		int cpu, int group_fd, unsigned long flags)
cpu               177 tools/testing/selftests/powerpc/utils.c 	return syscall(__NR_perf_event_open, hw_event, pid, cpu,
cpu               118 tools/testing/selftests/powerpc/vphn/asm/lppaca.h #define lppaca_of(cpu)	(*paca_ptrs[cpu]->lppaca_ptr)
cpu               190 tools/testing/selftests/powerpc/vphn/asm/lppaca.h extern void register_dtl_buffer(int cpu);
cpu               192 tools/testing/selftests/powerpc/vphn/asm/lppaca.h extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
cpu                79 tools/testing/selftests/powerpc/vphn/vphn.c long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity)
cpu                84 tools/testing/selftests/powerpc/vphn/vphn.c 	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, cpu);
cpu                47 tools/testing/selftests/proc/proc-uptime-002.c 	unsigned int cpu;
cpu                64 tools/testing/selftests/proc/proc-uptime-002.c 	for (cpu = 0; cpu < len * 8; cpu++) {
cpu                66 tools/testing/selftests/proc/proc-uptime-002.c 		m[cpu / (8 * sizeof(unsigned long))] |= 1UL << (cpu % (8 * sizeof(unsigned long)));
cpu                28 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h #define per_cpu_ptr(ptr, cpu) \
cpu                29 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h 	((typeof(ptr)) ((char *) (ptr) + PERCPU_OFFSET * cpu))
cpu                90 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h #define for_each_possible_cpu(cpu) \
cpu                91 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h 	for ((cpu) = 0; (cpu) < NR_CPUS; ++(cpu))
cpu                45 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h 	int cpu;
cpu                55 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h static inline bool schedule_work_on(int cpu, struct work_struct *work)
cpu                50 tools/testing/selftests/rseq/basic_percpu_ops_test.c 	int cpu;
cpu                55 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		cpu = rseq_cpu_start();
cpu                56 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		ret = rseq_cmpeqv_storev(&lock->c[cpu].v,
cpu                57 tools/testing/selftests/rseq/basic_percpu_ops_test.c 					 0, 1, cpu);
cpu                67 tools/testing/selftests/rseq/basic_percpu_ops_test.c 	return cpu;
cpu                70 tools/testing/selftests/rseq/basic_percpu_ops_test.c void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
cpu                72 tools/testing/selftests/rseq/basic_percpu_ops_test.c 	assert(lock->c[cpu].v == 1);
cpu                77 tools/testing/selftests/rseq/basic_percpu_ops_test.c 	rseq_smp_store_release(&lock->c[cpu].v, 0);
cpu                83 tools/testing/selftests/rseq/basic_percpu_ops_test.c 	int i, cpu;
cpu                91 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		cpu = rseq_this_cpu_lock(&data->lock);
cpu                92 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		data->c[cpu].count++;
cpu                93 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		rseq_percpu_unlock(&data->lock, cpu);
cpu               139 tools/testing/selftests/rseq/basic_percpu_ops_test.c 	int cpu;
cpu               145 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		cpu = rseq_cpu_start();
cpu               147 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
cpu               149 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		targetptr = (intptr_t *)&list->c[cpu].head;
cpu               151 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu);
cpu               157 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		*_cpu = cpu;
cpu               172 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		int ret, cpu;
cpu               174 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		cpu = rseq_cpu_start();
cpu               175 tools/testing/selftests/rseq/basic_percpu_ops_test.c 		targetptr = (intptr_t *)&list->c[cpu].head;
cpu               180 tools/testing/selftests/rseq/basic_percpu_ops_test.c 						 offset, load, cpu);
cpu               183 tools/testing/selftests/rseq/basic_percpu_ops_test.c 				*_cpu = cpu;
cpu               196 tools/testing/selftests/rseq/basic_percpu_ops_test.c struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
cpu               200 tools/testing/selftests/rseq/basic_percpu_ops_test.c 	node = list->c[cpu].head;
cpu               203 tools/testing/selftests/rseq/basic_percpu_ops_test.c 	list->c[cpu].head = node->next;
cpu               328 tools/testing/selftests/rseq/param_test.c 	int cpu;
cpu               333 tools/testing/selftests/rseq/param_test.c 		cpu = rseq_cpu_start();
cpu               334 tools/testing/selftests/rseq/param_test.c 		ret = rseq_cmpeqv_storev(&lock->c[cpu].v,
cpu               335 tools/testing/selftests/rseq/param_test.c 					 0, 1, cpu);
cpu               345 tools/testing/selftests/rseq/param_test.c 	return cpu;
cpu               348 tools/testing/selftests/rseq/param_test.c static void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
cpu               350 tools/testing/selftests/rseq/param_test.c 	assert(lock->c[cpu].v == 1);
cpu               355 tools/testing/selftests/rseq/param_test.c 	rseq_smp_store_release(&lock->c[cpu].v, 0);
cpu               369 tools/testing/selftests/rseq/param_test.c 		int cpu = rseq_cpu_start();
cpu               371 tools/testing/selftests/rseq/param_test.c 		cpu = rseq_this_cpu_lock(&data->lock);
cpu               372 tools/testing/selftests/rseq/param_test.c 		data->c[cpu].count++;
cpu               373 tools/testing/selftests/rseq/param_test.c 		rseq_percpu_unlock(&data->lock, cpu);
cpu               451 tools/testing/selftests/rseq/param_test.c 			int cpu;
cpu               453 tools/testing/selftests/rseq/param_test.c 			cpu = rseq_cpu_start();
cpu               454 tools/testing/selftests/rseq/param_test.c 			ret = rseq_addv(&data->c[cpu].count, 1, cpu);
cpu               517 tools/testing/selftests/rseq/param_test.c 	int cpu;
cpu               523 tools/testing/selftests/rseq/param_test.c 		cpu = rseq_cpu_start();
cpu               525 tools/testing/selftests/rseq/param_test.c 		expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
cpu               527 tools/testing/selftests/rseq/param_test.c 		targetptr = (intptr_t *)&list->c[cpu].head;
cpu               529 tools/testing/selftests/rseq/param_test.c 		ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu);
cpu               535 tools/testing/selftests/rseq/param_test.c 		*_cpu = cpu;
cpu               547 tools/testing/selftests/rseq/param_test.c 	int cpu;
cpu               555 tools/testing/selftests/rseq/param_test.c 		cpu = rseq_cpu_start();
cpu               556 tools/testing/selftests/rseq/param_test.c 		targetptr = (intptr_t *)&list->c[cpu].head;
cpu               561 tools/testing/selftests/rseq/param_test.c 						   offset, load, cpu);
cpu               571 tools/testing/selftests/rseq/param_test.c 		*_cpu = cpu;
cpu               579 tools/testing/selftests/rseq/param_test.c struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
cpu               583 tools/testing/selftests/rseq/param_test.c 	node = list->c[cpu].head;
cpu               586 tools/testing/selftests/rseq/param_test.c 	list->c[cpu].head = node->next;
cpu               691 tools/testing/selftests/rseq/param_test.c 	int cpu;
cpu               699 tools/testing/selftests/rseq/param_test.c 		cpu = rseq_cpu_start();
cpu               700 tools/testing/selftests/rseq/param_test.c 		offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
cpu               701 tools/testing/selftests/rseq/param_test.c 		if (offset == buffer->c[cpu].buflen)
cpu               704 tools/testing/selftests/rseq/param_test.c 		targetptr_spec = (intptr_t *)&buffer->c[cpu].array[offset];
cpu               706 tools/testing/selftests/rseq/param_test.c 		targetptr_final = &buffer->c[cpu].offset;
cpu               710 tools/testing/selftests/rseq/param_test.c 				newval_spec, newval_final, cpu);
cpu               714 tools/testing/selftests/rseq/param_test.c 				newval_final, cpu);
cpu               722 tools/testing/selftests/rseq/param_test.c 		*_cpu = cpu;
cpu               730 tools/testing/selftests/rseq/param_test.c 	int cpu;
cpu               737 tools/testing/selftests/rseq/param_test.c 		cpu = rseq_cpu_start();
cpu               739 tools/testing/selftests/rseq/param_test.c 		offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
cpu               744 tools/testing/selftests/rseq/param_test.c 		head = RSEQ_READ_ONCE(buffer->c[cpu].array[offset - 1]);
cpu               746 tools/testing/selftests/rseq/param_test.c 		targetptr = (intptr_t *)&buffer->c[cpu].offset;
cpu               748 tools/testing/selftests/rseq/param_test.c 			(intptr_t *)&buffer->c[cpu].array[offset - 1],
cpu               749 tools/testing/selftests/rseq/param_test.c 			(intptr_t)head, newval, cpu);
cpu               755 tools/testing/selftests/rseq/param_test.c 		*_cpu = cpu;
cpu               764 tools/testing/selftests/rseq/param_test.c 					       int cpu)
cpu               769 tools/testing/selftests/rseq/param_test.c 	offset = buffer->c[cpu].offset;
cpu               772 tools/testing/selftests/rseq/param_test.c 	head = buffer->c[cpu].array[offset - 1];
cpu               773 tools/testing/selftests/rseq/param_test.c 	buffer->c[cpu].offset = offset - 1;
cpu               896 tools/testing/selftests/rseq/param_test.c 	int cpu;
cpu               904 tools/testing/selftests/rseq/param_test.c 		cpu = rseq_cpu_start();
cpu               906 tools/testing/selftests/rseq/param_test.c 		offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
cpu               907 tools/testing/selftests/rseq/param_test.c 		if (offset == buffer->c[cpu].buflen)
cpu               909 tools/testing/selftests/rseq/param_test.c 		destptr = (char *)&buffer->c[cpu].array[offset];
cpu               914 tools/testing/selftests/rseq/param_test.c 		targetptr_final = &buffer->c[cpu].offset;
cpu               919 tools/testing/selftests/rseq/param_test.c 				newval_final, cpu);
cpu               923 tools/testing/selftests/rseq/param_test.c 				newval_final, cpu);
cpu               931 tools/testing/selftests/rseq/param_test.c 		*_cpu = cpu;
cpu               940 tools/testing/selftests/rseq/param_test.c 	int cpu;
cpu               948 tools/testing/selftests/rseq/param_test.c 		cpu = rseq_cpu_start();
cpu               950 tools/testing/selftests/rseq/param_test.c 		offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
cpu               954 tools/testing/selftests/rseq/param_test.c 		srcptr = (char *)&buffer->c[cpu].array[offset - 1];
cpu               958 tools/testing/selftests/rseq/param_test.c 		targetptr_final = &buffer->c[cpu].offset;
cpu               961 tools/testing/selftests/rseq/param_test.c 			newval_final, cpu);
cpu               969 tools/testing/selftests/rseq/param_test.c 		*_cpu = cpu;
cpu               979 tools/testing/selftests/rseq/param_test.c 				int cpu)
cpu               983 tools/testing/selftests/rseq/param_test.c 	offset = buffer->c[cpu].offset;
cpu               986 tools/testing/selftests/rseq/param_test.c 	memcpy(item, &buffer->c[cpu].array[offset - 1], sizeof(*item));
cpu               987 tools/testing/selftests/rseq/param_test.c 	buffer->c[cpu].offset = offset - 1;
cpu               153 tools/testing/selftests/rseq/rseq-arm.h int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
cpu               187 tools/testing/selftests/rseq/rseq-arm.h 		: [cpu_id]		"r" (cpu),
cpu               220 tools/testing/selftests/rseq/rseq-arm.h 			       off_t voffp, intptr_t *load, int cpu)
cpu               257 tools/testing/selftests/rseq/rseq-arm.h 		: [cpu_id]		"r" (cpu),
cpu               291 tools/testing/selftests/rseq/rseq-arm.h int rseq_addv(intptr_t *v, intptr_t count, int cpu)
cpu               318 tools/testing/selftests/rseq/rseq-arm.h 		: [cpu_id]		"r" (cpu),
cpu               346 tools/testing/selftests/rseq/rseq-arm.h 				 intptr_t newv, int cpu)
cpu               383 tools/testing/selftests/rseq/rseq-arm.h 		: [cpu_id]		"r" (cpu),
cpu               421 tools/testing/selftests/rseq/rseq-arm.h 					 intptr_t newv, int cpu)
cpu               459 tools/testing/selftests/rseq/rseq-arm.h 		: [cpu_id]		"r" (cpu),
cpu               497 tools/testing/selftests/rseq/rseq-arm.h 			      intptr_t newv, int cpu)
cpu               539 tools/testing/selftests/rseq/rseq-arm.h 		: [cpu_id]		"r" (cpu),
cpu               579 tools/testing/selftests/rseq/rseq-arm.h 				 intptr_t newv, int cpu)
cpu               659 tools/testing/selftests/rseq/rseq-arm.h 		: [cpu_id]		"r" (cpu),
cpu               703 tools/testing/selftests/rseq/rseq-arm.h 					 intptr_t newv, int cpu)
cpu               784 tools/testing/selftests/rseq/rseq-arm.h 		: [cpu_id]		"r" (cpu),
cpu               208 tools/testing/selftests/rseq/rseq-arm64.h int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
cpu               232 tools/testing/selftests/rseq/rseq-arm64.h 		: [cpu_id]		"r" (cpu),
cpu               262 tools/testing/selftests/rseq/rseq-arm64.h 			       off_t voffp, intptr_t *load, int cpu)
cpu               289 tools/testing/selftests/rseq/rseq-arm64.h 		: [cpu_id]		"r" (cpu),
cpu               318 tools/testing/selftests/rseq/rseq-arm64.h int rseq_addv(intptr_t *v, intptr_t count, int cpu)
cpu               339 tools/testing/selftests/rseq/rseq-arm64.h 		: [cpu_id]		"r" (cpu),
cpu               364 tools/testing/selftests/rseq/rseq-arm64.h 				 intptr_t newv, int cpu)
cpu               390 tools/testing/selftests/rseq/rseq-arm64.h 		: [cpu_id]		"r" (cpu),
cpu               423 tools/testing/selftests/rseq/rseq-arm64.h 					 intptr_t newv, int cpu)
cpu               449 tools/testing/selftests/rseq/rseq-arm64.h 		: [cpu_id]		"r" (cpu),
cpu               482 tools/testing/selftests/rseq/rseq-arm64.h 			      intptr_t newv, int cpu)
cpu               510 tools/testing/selftests/rseq/rseq-arm64.h 		: [cpu_id]		"r" (cpu),
cpu               545 tools/testing/selftests/rseq/rseq-arm64.h 				 intptr_t newv, int cpu)
cpu               571 tools/testing/selftests/rseq/rseq-arm64.h 		: [cpu_id]		"r" (cpu),
cpu               605 tools/testing/selftests/rseq/rseq-arm64.h 					 intptr_t newv, int cpu)
cpu               631 tools/testing/selftests/rseq/rseq-arm64.h 		: [cpu_id]		"r" (cpu),
cpu               160 tools/testing/selftests/rseq/rseq-mips.h int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
cpu               192 tools/testing/selftests/rseq/rseq-mips.h 		: [cpu_id]		"r" (cpu),
cpu               225 tools/testing/selftests/rseq/rseq-mips.h 			       off_t voffp, intptr_t *load, int cpu)
cpu               260 tools/testing/selftests/rseq/rseq-mips.h 		: [cpu_id]		"r" (cpu),
cpu               294 tools/testing/selftests/rseq/rseq-mips.h int rseq_addv(intptr_t *v, intptr_t count, int cpu)
cpu               321 tools/testing/selftests/rseq/rseq-mips.h 		: [cpu_id]		"r" (cpu),
cpu               349 tools/testing/selftests/rseq/rseq-mips.h 				 intptr_t newv, int cpu)
cpu               384 tools/testing/selftests/rseq/rseq-mips.h 		: [cpu_id]		"r" (cpu),
cpu               422 tools/testing/selftests/rseq/rseq-mips.h 					 intptr_t newv, int cpu)
cpu               458 tools/testing/selftests/rseq/rseq-mips.h 		: [cpu_id]		"r" (cpu),
cpu               496 tools/testing/selftests/rseq/rseq-mips.h 			      intptr_t newv, int cpu)
cpu               534 tools/testing/selftests/rseq/rseq-mips.h 		: [cpu_id]		"r" (cpu),
cpu               574 tools/testing/selftests/rseq/rseq-mips.h 				 intptr_t newv, int cpu)
cpu               651 tools/testing/selftests/rseq/rseq-mips.h 		: [cpu_id]		"r" (cpu),
cpu               695 tools/testing/selftests/rseq/rseq-mips.h 					 intptr_t newv, int cpu)
cpu               773 tools/testing/selftests/rseq/rseq-mips.h 		: [cpu_id]		"r" (cpu),
cpu               207 tools/testing/selftests/rseq/rseq-ppc.h int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
cpu               237 tools/testing/selftests/rseq/rseq-ppc.h 		: [cpu_id]		"r" (cpu),
cpu               267 tools/testing/selftests/rseq/rseq-ppc.h 			       off_t voffp, intptr_t *load, int cpu)
cpu               303 tools/testing/selftests/rseq/rseq-ppc.h 		: [cpu_id]		"r" (cpu),
cpu               334 tools/testing/selftests/rseq/rseq-ppc.h int rseq_addv(intptr_t *v, intptr_t count, int cpu)
cpu               361 tools/testing/selftests/rseq/rseq-ppc.h 		: [cpu_id]		"r" (cpu),
cpu               388 tools/testing/selftests/rseq/rseq-ppc.h 				 intptr_t newv, int cpu)
cpu               421 tools/testing/selftests/rseq/rseq-ppc.h 		: [cpu_id]		"r" (cpu),
cpu               456 tools/testing/selftests/rseq/rseq-ppc.h 					 intptr_t newv, int cpu)
cpu               491 tools/testing/selftests/rseq/rseq-ppc.h 		: [cpu_id]		"r" (cpu),
cpu               526 tools/testing/selftests/rseq/rseq-ppc.h 			      intptr_t newv, int cpu)
cpu               562 tools/testing/selftests/rseq/rseq-ppc.h 		: [cpu_id]		"r" (cpu),
cpu               599 tools/testing/selftests/rseq/rseq-ppc.h 				 intptr_t newv, int cpu)
cpu               637 tools/testing/selftests/rseq/rseq-ppc.h 		: [cpu_id]		"r" (cpu),
cpu               673 tools/testing/selftests/rseq/rseq-ppc.h 					 intptr_t newv, int cpu)
cpu               713 tools/testing/selftests/rseq/rseq-ppc.h 		: [cpu_id]		"r" (cpu),
cpu               138 tools/testing/selftests/rseq/rseq-s390.h int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
cpu               167 tools/testing/selftests/rseq/rseq-s390.h 		: [cpu_id]		"r" (cpu),
cpu               201 tools/testing/selftests/rseq/rseq-s390.h 			       off_t voffp, intptr_t *load, int cpu)
cpu               235 tools/testing/selftests/rseq/rseq-s390.h 		: [cpu_id]		"r" (cpu),
cpu               266 tools/testing/selftests/rseq/rseq-s390.h int rseq_addv(intptr_t *v, intptr_t count, int cpu)
cpu               290 tools/testing/selftests/rseq/rseq-s390.h 		: [cpu_id]		"r" (cpu),
cpu               317 tools/testing/selftests/rseq/rseq-s390.h 				 intptr_t newv, int cpu)
cpu               349 tools/testing/selftests/rseq/rseq-s390.h 		: [cpu_id]		"r" (cpu),
cpu               385 tools/testing/selftests/rseq/rseq-s390.h 					 intptr_t newv, int cpu)
cpu               387 tools/testing/selftests/rseq/rseq-s390.h 	return rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, newv, cpu);
cpu               393 tools/testing/selftests/rseq/rseq-s390.h 			      intptr_t newv, int cpu)
cpu               428 tools/testing/selftests/rseq/rseq-s390.h 		: [cpu_id]		"r" (cpu),
cpu               465 tools/testing/selftests/rseq/rseq-s390.h 				 intptr_t newv, int cpu)
cpu               536 tools/testing/selftests/rseq/rseq-s390.h 		: [cpu_id]		"r" (cpu),
cpu               576 tools/testing/selftests/rseq/rseq-s390.h 					 intptr_t newv, int cpu)
cpu               579 tools/testing/selftests/rseq/rseq-s390.h 					    newv, cpu);
cpu                 9 tools/testing/selftests/rseq/rseq-skip.h int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
cpu                16 tools/testing/selftests/rseq/rseq-skip.h 			       off_t voffp, intptr_t *load, int cpu)
cpu                22 tools/testing/selftests/rseq/rseq-skip.h int rseq_addv(intptr_t *v, intptr_t count, int cpu)
cpu                30 tools/testing/selftests/rseq/rseq-skip.h 				 intptr_t newv, int cpu)
cpu                38 tools/testing/selftests/rseq/rseq-skip.h 					 intptr_t newv, int cpu)
cpu                46 tools/testing/selftests/rseq/rseq-skip.h 			      intptr_t newv, int cpu)
cpu                54 tools/testing/selftests/rseq/rseq-skip.h 				 intptr_t newv, int cpu)
cpu                62 tools/testing/selftests/rseq/rseq-skip.h 					 intptr_t newv, int cpu)
cpu               114 tools/testing/selftests/rseq/rseq-x86.h int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
cpu               143 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               175 tools/testing/selftests/rseq/rseq-x86.h 			       off_t voffp, intptr_t *load, int cpu)
cpu               209 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               238 tools/testing/selftests/rseq/rseq-x86.h int rseq_addv(intptr_t *v, intptr_t count, int cpu)
cpu               260 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               285 tools/testing/selftests/rseq/rseq-x86.h 				 intptr_t newv, int cpu)
cpu               317 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               351 tools/testing/selftests/rseq/rseq-x86.h 					 intptr_t newv, int cpu)
cpu               353 tools/testing/selftests/rseq/rseq-x86.h 	return rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, newv, cpu);
cpu               359 tools/testing/selftests/rseq/rseq-x86.h 			      intptr_t newv, int cpu)
cpu               394 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               429 tools/testing/selftests/rseq/rseq-x86.h 				 intptr_t newv, int cpu)
cpu               500 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               538 tools/testing/selftests/rseq/rseq-x86.h 					 intptr_t newv, int cpu)
cpu               541 tools/testing/selftests/rseq/rseq-x86.h 					    newv, cpu);
cpu               635 tools/testing/selftests/rseq/rseq-x86.h int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
cpu               664 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               696 tools/testing/selftests/rseq/rseq-x86.h 			       off_t voffp, intptr_t *load, int cpu)
cpu               730 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               759 tools/testing/selftests/rseq/rseq-x86.h int rseq_addv(intptr_t *v, intptr_t count, int cpu)
cpu               781 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               806 tools/testing/selftests/rseq/rseq-x86.h 				 intptr_t newv, int cpu)
cpu               839 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               872 tools/testing/selftests/rseq/rseq-x86.h 					 intptr_t newv, int cpu)
cpu               907 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               941 tools/testing/selftests/rseq/rseq-x86.h 			      intptr_t newv, int cpu)
cpu               977 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu              1013 tools/testing/selftests/rseq/rseq-x86.h 				 intptr_t newv, int cpu)
cpu              1087 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu              1125 tools/testing/selftests/rseq/rseq-x86.h 					 intptr_t newv, int cpu)
cpu              1200 tools/testing/selftests/rseq/rseq-x86.h 		: [cpu_id]		"r" (cpu),
cpu               131 tools/testing/selftests/rseq/rseq.c 	int32_t cpu;
cpu               133 tools/testing/selftests/rseq/rseq.c 	cpu = sched_getcpu();
cpu               134 tools/testing/selftests/rseq/rseq.c 	if (cpu < 0) {
cpu               138 tools/testing/selftests/rseq/rseq.c 	return cpu;
cpu               132 tools/testing/selftests/rseq/rseq.h 	int32_t cpu;
cpu               134 tools/testing/selftests/rseq/rseq.h 	cpu = rseq_current_cpu_raw();
cpu               135 tools/testing/selftests/rseq/rseq.h 	if (rseq_unlikely(cpu < 0))
cpu               136 tools/testing/selftests/rseq/rseq.h 		cpu = rseq_fallback_current_cpu();
cpu               137 tools/testing/selftests/rseq/rseq.h 	return cpu;
cpu               286 tools/testing/selftests/vm/userfaultfd.c 	unsigned long cpu = (unsigned long) arg;
cpu               298 tools/testing/selftests/vm/userfaultfd.c 			seed += cpu;
cpu               306 tools/testing/selftests/vm/userfaultfd.c 			page_nr += cpu * nr_pages_per_cpu;
cpu               365 tools/testing/selftests/vm/userfaultfd.c 					page_nr, cpu, area_dst + page_nr * page_size,
cpu               491 tools/testing/selftests/vm/userfaultfd.c 	unsigned long cpu = (unsigned long) arg;
cpu               501 tools/testing/selftests/vm/userfaultfd.c 	pollfd[1].fd = pipefd[cpu*2];
cpu               572 tools/testing/selftests/vm/userfaultfd.c 	unsigned long cpu = (unsigned long) arg;
cpu               575 tools/testing/selftests/vm/userfaultfd.c 	for (page_nr = cpu * nr_pages_per_cpu;
cpu               576 tools/testing/selftests/vm/userfaultfd.c 	     page_nr < (cpu+1) * nr_pages_per_cpu;
cpu               585 tools/testing/selftests/vm/userfaultfd.c 	unsigned long cpu;
cpu               592 tools/testing/selftests/vm/userfaultfd.c 	for (cpu = 0; cpu < nr_cpus; cpu++) {
cpu               593 tools/testing/selftests/vm/userfaultfd.c 		if (pthread_create(&locking_threads[cpu], &attr,
cpu               594 tools/testing/selftests/vm/userfaultfd.c 				   locking_thread, (void *)cpu))
cpu               597 tools/testing/selftests/vm/userfaultfd.c 			if (pthread_create(&uffd_threads[cpu], &attr,
cpu               598 tools/testing/selftests/vm/userfaultfd.c 					   uffd_poll_thread, (void *)cpu))
cpu               601 tools/testing/selftests/vm/userfaultfd.c 			if (pthread_create(&uffd_threads[cpu], &attr,
cpu               603 tools/testing/selftests/vm/userfaultfd.c 					   &_userfaults[cpu]))
cpu               607 tools/testing/selftests/vm/userfaultfd.c 		if (pthread_create(&background_threads[cpu], &attr,
cpu               608 tools/testing/selftests/vm/userfaultfd.c 				   background_thread, (void *)cpu))
cpu               611 tools/testing/selftests/vm/userfaultfd.c 	for (cpu = 0; cpu < nr_cpus; cpu++)
cpu               612 tools/testing/selftests/vm/userfaultfd.c 		if (pthread_join(background_threads[cpu], NULL))
cpu               629 tools/testing/selftests/vm/userfaultfd.c 	for (cpu = 0; cpu < nr_cpus; cpu++)
cpu               630 tools/testing/selftests/vm/userfaultfd.c 		if (pthread_join(locking_threads[cpu], NULL))
cpu               633 tools/testing/selftests/vm/userfaultfd.c 	for (cpu = 0; cpu < nr_cpus; cpu++) {
cpu               636 tools/testing/selftests/vm/userfaultfd.c 			if (write(pipefd[cpu*2+1], &c, 1) != 1) {
cpu               640 tools/testing/selftests/vm/userfaultfd.c 			if (pthread_join(uffd_threads[cpu], &_userfaults[cpu]))
cpu               643 tools/testing/selftests/vm/userfaultfd.c 			if (pthread_cancel(uffd_threads[cpu]))
cpu               645 tools/testing/selftests/vm/userfaultfd.c 			if (pthread_join(uffd_threads[cpu], NULL))
cpu              1039 tools/testing/selftests/vm/userfaultfd.c 	unsigned long cpu;
cpu              1078 tools/testing/selftests/vm/userfaultfd.c 	for (cpu = 0; cpu < nr_cpus; cpu++) {
cpu              1079 tools/testing/selftests/vm/userfaultfd.c 		if (pipe2(&pipefd[cpu*2], O_CLOEXEC | O_NONBLOCK)) {
cpu              1213 tools/testing/selftests/vm/userfaultfd.c 		for (cpu = 0; cpu < nr_cpus; cpu++)
cpu              1214 tools/testing/selftests/vm/userfaultfd.c 			printf(" %lu", userfaults[cpu]);
cpu               118 tools/testing/selftests/x86/test_vdso.c static long sys_getcpu(unsigned * cpu, unsigned * node,
cpu               121 tools/testing/selftests/x86/test_vdso.c 	return syscall(__NR_getcpu, cpu, node, cache);
cpu               138 tools/testing/selftests/x86/test_vdso.c 	for (int cpu = 0; ; cpu++) {
cpu               141 tools/testing/selftests/x86/test_vdso.c 		CPU_SET(cpu, &cpuset);
cpu               164 tools/testing/selftests/x86/test_vdso.c 		if (!ret_sys && (cpu_sys != cpu || node_sys != node))
cpu               166 tools/testing/selftests/x86/test_vdso.c 		if (!ret_vdso && (cpu_vdso != cpu || node_vdso != node))
cpu               168 tools/testing/selftests/x86/test_vdso.c 		if (!ret_vsys && (cpu_vsys != cpu || node_vsys != node))
cpu               171 tools/testing/selftests/x86/test_vdso.c 		printf("[%s]\tCPU %u:", ok ? "OK" : "FAIL", cpu);
cpu               174 tools/testing/selftests/x86/test_vsyscall.c static inline long sys_getcpu(unsigned * cpu, unsigned * node,
cpu               177 tools/testing/selftests/x86/test_vsyscall.c 	return syscall(SYS_getcpu, cpu, node, cache);
cpu               311 tools/testing/selftests/x86/test_vsyscall.c static int test_getcpu(int cpu)
cpu               316 tools/testing/selftests/x86/test_vsyscall.c 	printf("[RUN]\tgetcpu() on CPU %d\n", cpu);
cpu               320 tools/testing/selftests/x86/test_vsyscall.c 	CPU_SET(cpu, &cpuset);
cpu               322 tools/testing/selftests/x86/test_vsyscall.c 		printf("[SKIP]\tfailed to force CPU %d\n", cpu);
cpu               336 tools/testing/selftests/x86/test_vsyscall.c 		if (cpu_sys != cpu) {
cpu               337 tools/testing/selftests/x86/test_vsyscall.c 			printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu);
cpu               355 tools/testing/selftests/x86/test_vsyscall.c 			if (cpu_vdso != cpu) {
cpu               356 tools/testing/selftests/x86/test_vsyscall.c 				printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu);
cpu               381 tools/testing/selftests/x86/test_vsyscall.c 			if (cpu_vsys != cpu) {
cpu               382 tools/testing/selftests/x86/test_vsyscall.c 				printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu);
cpu                81 tools/virtio/ringtest/main.c 	long int cpu;
cpu                87 tools/virtio/ringtest/main.c 	cpu = strtol(arg, &endptr, 0);
cpu                90 tools/virtio/ringtest/main.c 	assert(cpu >= 0 && cpu < CPU_SETSIZE);
cpu                94 tools/virtio/ringtest/main.c 	CPU_SET(cpu, &cpuset);
cpu                40 tools/virtio/virtio-trace/trace-agent-rw.c void *rw_thread_init(int cpu, const char *in_path, const char *out_path,
cpu                46 tools/virtio/virtio-trace/trace-agent-rw.c 	rw_ti->cpu_num = cpu;
cpu                51 tools/virtio/virtio-trace/trace-agent-rw.c 		pr_err("Could not open in_fd (CPU:%d)\n", cpu);
cpu                60 tools/virtio/virtio-trace/trace-agent-rw.c 			pr_err("Could not open out_fd (CPU:%d)\n", cpu);
cpu                68 tools/virtio/virtio-trace/trace-agent-rw.c 		pr_err("Could not create pipe in rw-thread(%d)\n", cpu);
cpu                77 tools/virtio/virtio-trace/trace-agent-rw.c 		pr_err("Could not change pipe size in rw-thread(%d)\n", cpu);
cpu               152 tools/virtio/virtio-trace/trace-agent.c 	int cpu;
cpu               157 tools/virtio/virtio-trace/trace-agent.c 	for (cpu = 0; cpu < s->cpus; cpu++) {
cpu               159 tools/virtio/virtio-trace/trace-agent.c 		in_path = make_input_path(cpu);
cpu               165 tools/virtio/virtio-trace/trace-agent.c 			out_path = make_output_path(cpu);
cpu               172 tools/virtio/virtio-trace/trace-agent.c 		rw_thread_init(cpu, in_path, out_path, s->use_stdout,
cpu               173 tools/virtio/virtio-trace/trace-agent.c 						s->pipe_size, s->rw_ti[cpu]);
cpu               220 tools/virtio/virtio-trace/trace-agent.c 	int cpu;
cpu               224 tools/virtio/virtio-trace/trace-agent.c 	for (cpu = 0; cpu < s->cpus; cpu++)
cpu               225 tools/virtio/virtio-trace/trace-agent.c 		rw_thread_per_cpu[cpu] = rw_thread_run(s->rw_ti[cpu]);
cpu               230 tools/virtio/virtio-trace/trace-agent.c 	for (cpu = 0; cpu < s->cpus; cpu++) {
cpu               233 tools/virtio/virtio-trace/trace-agent.c 		ret = pthread_join(rw_thread_per_cpu[cpu], NULL);
cpu               235 tools/virtio/virtio-trace/trace-agent.c 			pr_err("pthread_join() error:%d (cpu %d)\n", ret, cpu);
cpu                58 tools/virtio/virtio-trace/trace-agent.h extern void *rw_thread_init(int cpu, const char *in_path, const char *out_path,
cpu               884 virt/kvm/arm/arch_timer.c static int kvm_timer_starting_cpu(unsigned int cpu)
cpu               890 virt/kvm/arm/arch_timer.c static int kvm_timer_dying_cpu(unsigned int cpu)
cpu               108 virt/kvm/arm/arm.c 	int ret, cpu;
cpu               118 virt/kvm/arm/arm.c 	for_each_possible_cpu(cpu)
cpu               119 virt/kvm/arm/arm.c 		*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
cpu               367 virt/kvm/arm/arm.c void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
cpu               384 virt/kvm/arm/arm.c 	vcpu->cpu = cpu;
cpu               420 virt/kvm/arm/arm.c 	vcpu->cpu = -1;
cpu              1518 virt/kvm/arm/arm.c 	int cpu;
cpu              1521 virt/kvm/arm/arm.c 	for_each_possible_cpu(cpu)
cpu              1522 virt/kvm/arm/arm.c 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
cpu              1531 virt/kvm/arm/arm.c 	int cpu;
cpu              1544 virt/kvm/arm/arm.c 	for_each_possible_cpu(cpu) {
cpu              1553 virt/kvm/arm/arm.c 		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
cpu              1589 virt/kvm/arm/arm.c 	for_each_possible_cpu(cpu) {
cpu              1590 virt/kvm/arm/arm.c 		char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
cpu              1600 virt/kvm/arm/arm.c 	for_each_possible_cpu(cpu) {
cpu              1603 virt/kvm/arm/arm.c 		cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
cpu              1688 virt/kvm/arm/arm.c 	int ret, cpu;
cpu              1703 virt/kvm/arm/arm.c 	for_each_online_cpu(cpu) {
cpu              1704 virt/kvm/arm/arm.c 		smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
cpu              1706 virt/kvm/arm/arm.c 			kvm_err("Error, CPU %d not supported!\n", cpu);
cpu               446 virt/kvm/arm/vgic/vgic-init.c static int vgic_init_cpu_starting(unsigned int cpu)
cpu               453 virt/kvm/arm/vgic/vgic-init.c static int vgic_init_cpu_dying(unsigned int cpu)
cpu               198 virt/kvm/kvm_main.c 	int cpu = get_cpu();
cpu               200 virt/kvm/kvm_main.c 	kvm_arch_vcpu_load(vcpu, cpu);
cpu               251 virt/kvm/kvm_main.c 	int i, cpu, me;
cpu               262 virt/kvm/kvm_main.c 		cpu = vcpu->cpu;
cpu               267 virt/kvm/kvm_main.c 		if (tmp != NULL && cpu != -1 && cpu != me &&
cpu               269 virt/kvm/kvm_main.c 			__cpumask_set_cpu(cpu, tmp);
cpu               330 virt/kvm/kvm_main.c 	vcpu->cpu = -1;
cpu              2560 virt/kvm/kvm_main.c 	int cpu = vcpu->cpu;
cpu              2566 virt/kvm/kvm_main.c 	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
cpu              2568 virt/kvm/kvm_main.c 			smp_send_reschedule(cpu);
cpu              3685 virt/kvm/kvm_main.c 	int cpu = raw_smp_processor_id();
cpu              3688 virt/kvm/kvm_main.c 	if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
cpu              3691 virt/kvm/kvm_main.c 	cpumask_set_cpu(cpu, cpus_hardware_enabled);
cpu              3696 virt/kvm/kvm_main.c 		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
cpu              3698 virt/kvm/kvm_main.c 		pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
cpu              3702 virt/kvm/kvm_main.c static int kvm_starting_cpu(unsigned int cpu)
cpu              3713 virt/kvm/kvm_main.c 	int cpu = raw_smp_processor_id();
cpu              3715 virt/kvm/kvm_main.c 	if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
cpu              3717 virt/kvm/kvm_main.c 	cpumask_clear_cpu(cpu, cpus_hardware_enabled);
cpu              3721 virt/kvm/kvm_main.c static int kvm_dying_cpu(unsigned int cpu)
cpu              4372 virt/kvm/kvm_main.c static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
cpu              4379 virt/kvm/kvm_main.c 	kvm_arch_sched_in(vcpu, cpu);
cpu              4381 virt/kvm/kvm_main.c 	kvm_arch_vcpu_load(vcpu, cpu);
cpu              4405 virt/kvm/kvm_main.c 	int cpu;
cpu              4431 virt/kvm/kvm_main.c 	for_each_online_cpu(cpu) {
cpu              4432 virt/kvm/kvm_main.c 		smp_call_function_single(cpu, check_processor_compat, &r, 1);