1/*
2 * Xen SMP support
3 *
4 * This file implements the Xen versions of smp_ops.  SMP under Xen is
5 * very straightforward.  Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
7 *
8 * IPIs are handled through the Xen event mechanism.
9 *
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of.  As a
12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded.
14 */
15#include <linux/sched.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/irq_work.h>
20#include <linux/tick.h>
21
22#include <asm/paravirt.h>
23#include <asm/desc.h>
24#include <asm/pgtable.h>
25#include <asm/cpu.h>
26
27#include <xen/interface/xen.h>
28#include <xen/interface/vcpu.h>
29
30#include <asm/xen/interface.h>
31#include <asm/xen/hypercall.h>
32
33#include <xen/xen.h>
34#include <xen/page.h>
35#include <xen/events.h>
36
37#include <xen/hvc-console.h>
38#include "xen-ops.h"
39#include "mmu.h"
40#include "smp.h"
41
42cpumask_var_t xen_cpu_initialized_map;
43
44struct xen_common_irq {
45	int irq;
46	char *name;
47};
48static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
49static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
50static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
51static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
52static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
53
54static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
55static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
56static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
57
58/*
59 * Reschedule call back.
60 */
61static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
62{
63	inc_irq_stat(irq_resched_count);
64	scheduler_ipi();
65
66	return IRQ_HANDLED;
67}
68
69static void cpu_bringup(void)
70{
71	int cpu;
72
73	cpu_init();
74	touch_softlockup_watchdog();
75	preempt_disable();
76
77	/* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
78	if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
79		xen_enable_sysenter();
80		xen_enable_syscall();
81	}
82	cpu = smp_processor_id();
83	smp_store_cpu_info(cpu);
84	cpu_data(cpu).x86_max_cores = 1;
85	set_cpu_sibling_map(cpu);
86
87	xen_setup_cpu_clockevents();
88
89	notify_cpu_starting(cpu);
90
91	set_cpu_online(cpu, true);
92
93	cpu_set_state_online(cpu);  /* Implies full memory barrier. */
94
95	/* We can take interrupts now: we're officially "up". */
96	local_irq_enable();
97}
98
99/*
100 * Note: cpu parameter is only relevant for PVH. The reason for passing it
101 * is we can't do smp_processor_id until the percpu segments are loaded, for
102 * which we need the cpu number! So we pass it in rdi as first parameter.
103 */
104asmlinkage __visible void cpu_bringup_and_idle(int cpu)
105{
106#ifdef CONFIG_XEN_PVH
107	if (xen_feature(XENFEAT_auto_translated_physmap) &&
108	    xen_feature(XENFEAT_supervisor_mode_kernel))
109		xen_pvh_secondary_vcpu_init(cpu);
110#endif
111	cpu_bringup();
112	cpu_startup_entry(CPUHP_ONLINE);
113}
114
115static void xen_smp_intr_free(unsigned int cpu)
116{
117	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
118		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
119		per_cpu(xen_resched_irq, cpu).irq = -1;
120		kfree(per_cpu(xen_resched_irq, cpu).name);
121		per_cpu(xen_resched_irq, cpu).name = NULL;
122	}
123	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
124		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
125		per_cpu(xen_callfunc_irq, cpu).irq = -1;
126		kfree(per_cpu(xen_callfunc_irq, cpu).name);
127		per_cpu(xen_callfunc_irq, cpu).name = NULL;
128	}
129	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
130		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
131		per_cpu(xen_debug_irq, cpu).irq = -1;
132		kfree(per_cpu(xen_debug_irq, cpu).name);
133		per_cpu(xen_debug_irq, cpu).name = NULL;
134	}
135	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
136		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
137				       NULL);
138		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
139		kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
140		per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
141	}
142	if (xen_hvm_domain())
143		return;
144
145	if (per_cpu(xen_irq_work, cpu).irq >= 0) {
146		unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
147		per_cpu(xen_irq_work, cpu).irq = -1;
148		kfree(per_cpu(xen_irq_work, cpu).name);
149		per_cpu(xen_irq_work, cpu).name = NULL;
150	}
151};
152static int xen_smp_intr_init(unsigned int cpu)
153{
154	int rc;
155	char *resched_name, *callfunc_name, *debug_name;
156
157	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
158	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
159				    cpu,
160				    xen_reschedule_interrupt,
161				    IRQF_PERCPU|IRQF_NOBALANCING,
162				    resched_name,
163				    NULL);
164	if (rc < 0)
165		goto fail;
166	per_cpu(xen_resched_irq, cpu).irq = rc;
167	per_cpu(xen_resched_irq, cpu).name = resched_name;
168
169	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
170	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
171				    cpu,
172				    xen_call_function_interrupt,
173				    IRQF_PERCPU|IRQF_NOBALANCING,
174				    callfunc_name,
175				    NULL);
176	if (rc < 0)
177		goto fail;
178	per_cpu(xen_callfunc_irq, cpu).irq = rc;
179	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
180
181	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
182	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
183				     IRQF_PERCPU | IRQF_NOBALANCING,
184				     debug_name, NULL);
185	if (rc < 0)
186		goto fail;
187	per_cpu(xen_debug_irq, cpu).irq = rc;
188	per_cpu(xen_debug_irq, cpu).name = debug_name;
189
190	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
191	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
192				    cpu,
193				    xen_call_function_single_interrupt,
194				    IRQF_PERCPU|IRQF_NOBALANCING,
195				    callfunc_name,
196				    NULL);
197	if (rc < 0)
198		goto fail;
199	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
200	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
201
202	/*
203	 * The IRQ worker on PVHVM goes through the native path and uses the
204	 * IPI mechanism.
205	 */
206	if (xen_hvm_domain())
207		return 0;
208
209	callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
210	rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
211				    cpu,
212				    xen_irq_work_interrupt,
213				    IRQF_PERCPU|IRQF_NOBALANCING,
214				    callfunc_name,
215				    NULL);
216	if (rc < 0)
217		goto fail;
218	per_cpu(xen_irq_work, cpu).irq = rc;
219	per_cpu(xen_irq_work, cpu).name = callfunc_name;
220
221	return 0;
222
223 fail:
224	xen_smp_intr_free(cpu);
225	return rc;
226}
227
228static void __init xen_fill_possible_map(void)
229{
230	int i, rc;
231
232	if (xen_initial_domain())
233		return;
234
235	for (i = 0; i < nr_cpu_ids; i++) {
236		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
237		if (rc >= 0) {
238			num_processors++;
239			set_cpu_possible(i, true);
240		}
241	}
242}
243
244static void __init xen_filter_cpu_maps(void)
245{
246	int i, rc;
247	unsigned int subtract = 0;
248
249	if (!xen_initial_domain())
250		return;
251
252	num_processors = 0;
253	disabled_cpus = 0;
254	for (i = 0; i < nr_cpu_ids; i++) {
255		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
256		if (rc >= 0) {
257			num_processors++;
258			set_cpu_possible(i, true);
259		} else {
260			set_cpu_possible(i, false);
261			set_cpu_present(i, false);
262			subtract++;
263		}
264	}
265#ifdef CONFIG_HOTPLUG_CPU
266	/* This is akin to using 'nr_cpus' on the Linux command line.
267	 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
268	 * have up to X, while nr_cpu_ids is greater than X. This
269	 * normally is not a problem, except when CPU hotplugging
270	 * is involved and then there might be more than X CPUs
271	 * in the guest - which will not work as there is no
272	 * hypercall to expand the max number of VCPUs an already
273	 * running guest has. So cap it up to X. */
274	if (subtract)
275		nr_cpu_ids = nr_cpu_ids - subtract;
276#endif
277
278}
279
280static void __init xen_smp_prepare_boot_cpu(void)
281{
282	BUG_ON(smp_processor_id() != 0);
283	native_smp_prepare_boot_cpu();
284
285	if (xen_pv_domain()) {
286		if (!xen_feature(XENFEAT_writable_page_tables))
287			/* We've switched to the "real" per-cpu gdt, so make
288			 * sure the old memory can be recycled. */
289			make_lowmem_page_readwrite(xen_initial_gdt);
290
291#ifdef CONFIG_X86_32
292		/*
293		 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
294		 * expects __USER_DS
295		 */
296		loadsegment(ds, __USER_DS);
297		loadsegment(es, __USER_DS);
298#endif
299
300		xen_filter_cpu_maps();
301		xen_setup_vcpu_info_placement();
302	}
303	/*
304	 * The alternative logic (which patches the unlock/lock) runs before
305	 * the smp bootup up code is activated. Hence we need to set this up
306	 * the core kernel is being patched. Otherwise we will have only
307	 * modules patched but not core code.
308	 */
309	xen_init_spinlocks();
310}
311
312static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
313{
314	unsigned cpu;
315	unsigned int i;
316
317	if (skip_ioapic_setup) {
318		char *m = (max_cpus == 0) ?
319			"The nosmp parameter is incompatible with Xen; " \
320			"use Xen dom0_max_vcpus=1 parameter" :
321			"The noapic parameter is incompatible with Xen";
322
323		xen_raw_printk(m);
324		panic(m);
325	}
326	xen_init_lock_cpu(0);
327
328	smp_store_boot_cpu_info();
329	cpu_data(0).x86_max_cores = 1;
330
331	for_each_possible_cpu(i) {
332		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
333		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
334		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
335	}
336	set_cpu_sibling_map(0);
337
338	if (xen_smp_intr_init(0))
339		BUG();
340
341	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
342		panic("could not allocate xen_cpu_initialized_map\n");
343
344	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
345
346	/* Restrict the possible_map according to max_cpus. */
347	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
348		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
349			continue;
350		set_cpu_possible(cpu, false);
351	}
352
353	for_each_possible_cpu(cpu)
354		set_cpu_present(cpu, true);
355}
356
357static int
358cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
359{
360	struct vcpu_guest_context *ctxt;
361	struct desc_struct *gdt;
362	unsigned long gdt_mfn;
363
364	/* used to tell cpu_init() that it can proceed with initialization */
365	cpumask_set_cpu(cpu, cpu_callout_mask);
366	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
367		return 0;
368
369	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
370	if (ctxt == NULL)
371		return -ENOMEM;
372
373	gdt = get_cpu_gdt_table(cpu);
374
375#ifdef CONFIG_X86_32
376	/* Note: PVH is not yet supported on x86_32. */
377	ctxt->user_regs.fs = __KERNEL_PERCPU;
378	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
379#endif
380	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
381
382	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
383		ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
384		ctxt->flags = VGCF_IN_KERNEL;
385		ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
386		ctxt->user_regs.ds = __USER_DS;
387		ctxt->user_regs.es = __USER_DS;
388		ctxt->user_regs.ss = __KERNEL_DS;
389
390		xen_copy_trap_info(ctxt->trap_ctxt);
391
392		ctxt->ldt_ents = 0;
393
394		BUG_ON((unsigned long)gdt & ~PAGE_MASK);
395
396		gdt_mfn = arbitrary_virt_to_mfn(gdt);
397		make_lowmem_page_readonly(gdt);
398		make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
399
400		ctxt->gdt_frames[0] = gdt_mfn;
401		ctxt->gdt_ents      = GDT_ENTRIES;
402
403		ctxt->kernel_ss = __KERNEL_DS;
404		ctxt->kernel_sp = idle->thread.sp0;
405
406#ifdef CONFIG_X86_32
407		ctxt->event_callback_cs     = __KERNEL_CS;
408		ctxt->failsafe_callback_cs  = __KERNEL_CS;
409#else
410		ctxt->gs_base_kernel = per_cpu_offset(cpu);
411#endif
412		ctxt->event_callback_eip    =
413					(unsigned long)xen_hypervisor_callback;
414		ctxt->failsafe_callback_eip =
415					(unsigned long)xen_failsafe_callback;
416		ctxt->user_regs.cs = __KERNEL_CS;
417		per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
418	}
419#ifdef CONFIG_XEN_PVH
420	else {
421		/*
422		 * The vcpu comes on kernel page tables which have the NX pte
423		 * bit set. This means before DS/SS is touched, NX in
424		 * EFER must be set. Hence the following assembly glue code.
425		 */
426		ctxt->user_regs.eip = (unsigned long)xen_pvh_early_cpu_init;
427		ctxt->user_regs.rdi = cpu;
428		ctxt->user_regs.rsi = true;  /* entry == true */
429	}
430#endif
431	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
432	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
433	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
434		BUG();
435
436	kfree(ctxt);
437	return 0;
438}
439
440static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
441{
442	int rc;
443
444	common_cpu_up(cpu, idle);
445
446	xen_setup_runstate_info(cpu);
447	xen_setup_timer(cpu);
448	xen_init_lock_cpu(cpu);
449
450	/*
451	 * PV VCPUs are always successfully taken down (see 'while' loop
452	 * in xen_cpu_die()), so -EBUSY is an error.
453	 */
454	rc = cpu_check_up_prepare(cpu);
455	if (rc)
456		return rc;
457
458	/* make sure interrupts start blocked */
459	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
460
461	rc = cpu_initialize_context(cpu, idle);
462	if (rc)
463		return rc;
464
465	rc = xen_smp_intr_init(cpu);
466	if (rc)
467		return rc;
468
469	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
470	BUG_ON(rc);
471
472	while (cpu_report_state(cpu) != CPU_ONLINE)
473		HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
474
475	return 0;
476}
477
478static void xen_smp_cpus_done(unsigned int max_cpus)
479{
480}
481
482#ifdef CONFIG_HOTPLUG_CPU
483static int xen_cpu_disable(void)
484{
485	unsigned int cpu = smp_processor_id();
486	if (cpu == 0)
487		return -EBUSY;
488
489	cpu_disable_common();
490
491	load_cr3(swapper_pg_dir);
492	return 0;
493}
494
495static void xen_cpu_die(unsigned int cpu)
496{
497	while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
498		__set_current_state(TASK_UNINTERRUPTIBLE);
499		schedule_timeout(HZ/10);
500	}
501
502	if (common_cpu_die(cpu) == 0) {
503		xen_smp_intr_free(cpu);
504		xen_uninit_lock_cpu(cpu);
505		xen_teardown_timer(cpu);
506	}
507}
508
509static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
510{
511	play_dead_common();
512	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
513	cpu_bringup();
514	/*
515	 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
516	 * clears certain data that the cpu_idle loop (which called us
517	 * and that we return from) expects. The only way to get that
518	 * data back is to call:
519	 */
520	tick_nohz_idle_enter();
521}
522
523#else /* !CONFIG_HOTPLUG_CPU */
524static int xen_cpu_disable(void)
525{
526	return -ENOSYS;
527}
528
529static void xen_cpu_die(unsigned int cpu)
530{
531	BUG();
532}
533
534static void xen_play_dead(void)
535{
536	BUG();
537}
538
539#endif
540static void stop_self(void *v)
541{
542	int cpu = smp_processor_id();
543
544	/* make sure we're not pinning something down */
545	load_cr3(swapper_pg_dir);
546	/* should set up a minimal gdt */
547
548	set_cpu_online(cpu, false);
549
550	HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
551	BUG();
552}
553
554static void xen_stop_other_cpus(int wait)
555{
556	smp_call_function(stop_self, NULL, wait);
557}
558
559static void xen_smp_send_reschedule(int cpu)
560{
561	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
562}
563
564static void __xen_send_IPI_mask(const struct cpumask *mask,
565			      int vector)
566{
567	unsigned cpu;
568
569	for_each_cpu_and(cpu, mask, cpu_online_mask)
570		xen_send_IPI_one(cpu, vector);
571}
572
573static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
574{
575	int cpu;
576
577	__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
578
579	/* Make sure other vcpus get a chance to run if they need to. */
580	for_each_cpu(cpu, mask) {
581		if (xen_vcpu_stolen(cpu)) {
582			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
583			break;
584		}
585	}
586}
587
588static void xen_smp_send_call_function_single_ipi(int cpu)
589{
590	__xen_send_IPI_mask(cpumask_of(cpu),
591			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
592}
593
594static inline int xen_map_vector(int vector)
595{
596	int xen_vector;
597
598	switch (vector) {
599	case RESCHEDULE_VECTOR:
600		xen_vector = XEN_RESCHEDULE_VECTOR;
601		break;
602	case CALL_FUNCTION_VECTOR:
603		xen_vector = XEN_CALL_FUNCTION_VECTOR;
604		break;
605	case CALL_FUNCTION_SINGLE_VECTOR:
606		xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
607		break;
608	case IRQ_WORK_VECTOR:
609		xen_vector = XEN_IRQ_WORK_VECTOR;
610		break;
611#ifdef CONFIG_X86_64
612	case NMI_VECTOR:
613	case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
614		xen_vector = XEN_NMI_VECTOR;
615		break;
616#endif
617	default:
618		xen_vector = -1;
619		printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
620			vector);
621	}
622
623	return xen_vector;
624}
625
626void xen_send_IPI_mask(const struct cpumask *mask,
627			      int vector)
628{
629	int xen_vector = xen_map_vector(vector);
630
631	if (xen_vector >= 0)
632		__xen_send_IPI_mask(mask, xen_vector);
633}
634
635void xen_send_IPI_all(int vector)
636{
637	int xen_vector = xen_map_vector(vector);
638
639	if (xen_vector >= 0)
640		__xen_send_IPI_mask(cpu_online_mask, xen_vector);
641}
642
643void xen_send_IPI_self(int vector)
644{
645	int xen_vector = xen_map_vector(vector);
646
647	if (xen_vector >= 0)
648		xen_send_IPI_one(smp_processor_id(), xen_vector);
649}
650
651void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
652				int vector)
653{
654	unsigned cpu;
655	unsigned int this_cpu = smp_processor_id();
656	int xen_vector = xen_map_vector(vector);
657
658	if (!(num_online_cpus() > 1) || (xen_vector < 0))
659		return;
660
661	for_each_cpu_and(cpu, mask, cpu_online_mask) {
662		if (this_cpu == cpu)
663			continue;
664
665		xen_send_IPI_one(cpu, xen_vector);
666	}
667}
668
669void xen_send_IPI_allbutself(int vector)
670{
671	xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
672}
673
674static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
675{
676	irq_enter();
677	generic_smp_call_function_interrupt();
678	inc_irq_stat(irq_call_count);
679	irq_exit();
680
681	return IRQ_HANDLED;
682}
683
684static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
685{
686	irq_enter();
687	generic_smp_call_function_single_interrupt();
688	inc_irq_stat(irq_call_count);
689	irq_exit();
690
691	return IRQ_HANDLED;
692}
693
694static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
695{
696	irq_enter();
697	irq_work_run();
698	inc_irq_stat(apic_irq_work_irqs);
699	irq_exit();
700
701	return IRQ_HANDLED;
702}
703
704static const struct smp_ops xen_smp_ops __initconst = {
705	.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
706	.smp_prepare_cpus = xen_smp_prepare_cpus,
707	.smp_cpus_done = xen_smp_cpus_done,
708
709	.cpu_up = xen_cpu_up,
710	.cpu_die = xen_cpu_die,
711	.cpu_disable = xen_cpu_disable,
712	.play_dead = xen_play_dead,
713
714	.stop_other_cpus = xen_stop_other_cpus,
715	.smp_send_reschedule = xen_smp_send_reschedule,
716
717	.send_call_func_ipi = xen_smp_send_call_function_ipi,
718	.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
719};
720
721void __init xen_smp_init(void)
722{
723	smp_ops = xen_smp_ops;
724	xen_fill_possible_map();
725}
726
727static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
728{
729	native_smp_prepare_cpus(max_cpus);
730	WARN_ON(xen_smp_intr_init(0));
731
732	xen_init_lock_cpu(0);
733}
734
735static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
736{
737	int rc;
738
739	/*
740	 * This can happen if CPU was offlined earlier and
741	 * offlining timed out in common_cpu_die().
742	 */
743	if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
744		xen_smp_intr_free(cpu);
745		xen_uninit_lock_cpu(cpu);
746	}
747
748	/*
749	 * xen_smp_intr_init() needs to run before native_cpu_up()
750	 * so that IPI vectors are set up on the booting CPU before
751	 * it is marked online in native_cpu_up().
752	*/
753	rc = xen_smp_intr_init(cpu);
754	WARN_ON(rc);
755	if (!rc)
756		rc =  native_cpu_up(cpu, tidle);
757
758	/*
759	 * We must initialize the slowpath CPU kicker _after_ the native
760	 * path has executed. If we initialized it before none of the
761	 * unlocker IPI kicks would reach the booting CPU as the booting
762	 * CPU had not set itself 'online' in cpu_online_mask. That mask
763	 * is checked when IPIs are sent (on HVM at least).
764	 */
765	xen_init_lock_cpu(cpu);
766	return rc;
767}
768
769void __init xen_hvm_smp_init(void)
770{
771	if (!xen_have_vector_callback)
772		return;
773	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
774	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
775	smp_ops.cpu_up = xen_hvm_cpu_up;
776	smp_ops.cpu_die = xen_cpu_die;
777	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
778	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
779	smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
780}
781