1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
24#include <linux/vmalloc.h>
25#include <linux/hrtimer.h>
26#include <linux/fs.h>
27#include <linux/slab.h>
28#include <linux/file.h>
29#include <linux/module.h>
30#include <asm/cputable.h>
31#include <asm/uaccess.h>
32#include <asm/kvm_ppc.h>
33#include <asm/tlbflush.h>
34#include <asm/cputhreads.h>
35#include <asm/irqflags.h>
36#include "timing.h"
37#include "irq.h"
38#include "../mm/mmu_decl.h"
39
40#define CREATE_TRACE_POINTS
41#include "trace.h"
42
43struct kvmppc_ops *kvmppc_hv_ops;
44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45struct kvmppc_ops *kvmppc_pr_ops;
46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47
48
49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50{
51	return !!(v->arch.pending_exceptions) ||
52	       v->requests;
53}
54
55int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
56{
57	return 1;
58}
59
60/*
61 * Common checks before entering the guest world.  Call with interrupts
62 * disabled.
63 *
64 * returns:
65 *
66 * == 1 if we're ready to go into guest state
67 * <= 0 if we need to go back to the host with return value
68 */
69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
70{
71	int r;
72
73	WARN_ON(irqs_disabled());
74	hard_irq_disable();
75
76	while (true) {
77		if (need_resched()) {
78			local_irq_enable();
79			cond_resched();
80			hard_irq_disable();
81			continue;
82		}
83
84		if (signal_pending(current)) {
85			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
86			vcpu->run->exit_reason = KVM_EXIT_INTR;
87			r = -EINTR;
88			break;
89		}
90
91		vcpu->mode = IN_GUEST_MODE;
92
93		/*
94		 * Reading vcpu->requests must happen after setting vcpu->mode,
95		 * so we don't miss a request because the requester sees
96		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
97		 * before next entering the guest (and thus doesn't IPI).
98		 */
99		smp_mb();
100
101		if (vcpu->requests) {
102			/* Make sure we process requests preemptable */
103			local_irq_enable();
104			trace_kvm_check_requests(vcpu);
105			r = kvmppc_core_check_requests(vcpu);
106			hard_irq_disable();
107			if (r > 0)
108				continue;
109			break;
110		}
111
112		if (kvmppc_core_prepare_to_enter(vcpu)) {
113			/* interrupts got enabled in between, so we
114			   are back at square 1 */
115			continue;
116		}
117
118		kvm_guest_enter();
119		return 1;
120	}
121
122	/* return to host */
123	local_irq_enable();
124	return r;
125}
126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
127
128#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
129static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
130{
131	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
132	int i;
133
134	shared->sprg0 = swab64(shared->sprg0);
135	shared->sprg1 = swab64(shared->sprg1);
136	shared->sprg2 = swab64(shared->sprg2);
137	shared->sprg3 = swab64(shared->sprg3);
138	shared->srr0 = swab64(shared->srr0);
139	shared->srr1 = swab64(shared->srr1);
140	shared->dar = swab64(shared->dar);
141	shared->msr = swab64(shared->msr);
142	shared->dsisr = swab32(shared->dsisr);
143	shared->int_pending = swab32(shared->int_pending);
144	for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
145		shared->sr[i] = swab32(shared->sr[i]);
146}
147#endif
148
149int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
150{
151	int nr = kvmppc_get_gpr(vcpu, 11);
152	int r;
153	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
154	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
155	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
156	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
157	unsigned long r2 = 0;
158
159	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
160		/* 32 bit mode */
161		param1 &= 0xffffffff;
162		param2 &= 0xffffffff;
163		param3 &= 0xffffffff;
164		param4 &= 0xffffffff;
165	}
166
167	switch (nr) {
168	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
169	{
170#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
171		/* Book3S can be little endian, find it out here */
172		int shared_big_endian = true;
173		if (vcpu->arch.intr_msr & MSR_LE)
174			shared_big_endian = false;
175		if (shared_big_endian != vcpu->arch.shared_big_endian)
176			kvmppc_swab_shared(vcpu);
177		vcpu->arch.shared_big_endian = shared_big_endian;
178#endif
179
180		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
181			/*
182			 * Older versions of the Linux magic page code had
183			 * a bug where they would map their trampoline code
184			 * NX. If that's the case, remove !PR NX capability.
185			 */
186			vcpu->arch.disable_kernel_nx = true;
187			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
188		}
189
190		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
191		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
192
193#ifdef CONFIG_PPC_64K_PAGES
194		/*
195		 * Make sure our 4k magic page is in the same window of a 64k
196		 * page within the guest and within the host's page.
197		 */
198		if ((vcpu->arch.magic_page_pa & 0xf000) !=
199		    ((ulong)vcpu->arch.shared & 0xf000)) {
200			void *old_shared = vcpu->arch.shared;
201			ulong shared = (ulong)vcpu->arch.shared;
202			void *new_shared;
203
204			shared &= PAGE_MASK;
205			shared |= vcpu->arch.magic_page_pa & 0xf000;
206			new_shared = (void*)shared;
207			memcpy(new_shared, old_shared, 0x1000);
208			vcpu->arch.shared = new_shared;
209		}
210#endif
211
212		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
213
214		r = EV_SUCCESS;
215		break;
216	}
217	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
218		r = EV_SUCCESS;
219#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
220		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
221#endif
222
223		/* Second return value is in r4 */
224		break;
225	case EV_HCALL_TOKEN(EV_IDLE):
226		r = EV_SUCCESS;
227		kvm_vcpu_block(vcpu);
228		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
229		break;
230	default:
231		r = EV_UNIMPLEMENTED;
232		break;
233	}
234
235	kvmppc_set_gpr(vcpu, 4, r2);
236
237	return r;
238}
239EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
240
241int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
242{
243	int r = false;
244
245	/* We have to know what CPU to virtualize */
246	if (!vcpu->arch.pvr)
247		goto out;
248
249	/* PAPR only works with book3s_64 */
250	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
251		goto out;
252
253	/* HV KVM can only do PAPR mode for now */
254	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
255		goto out;
256
257#ifdef CONFIG_KVM_BOOKE_HV
258	if (!cpu_has_feature(CPU_FTR_EMB_HV))
259		goto out;
260#endif
261
262	r = true;
263
264out:
265	vcpu->arch.sane = r;
266	return r ? 0 : -EINVAL;
267}
268EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
269
270int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
271{
272	enum emulation_result er;
273	int r;
274
275	er = kvmppc_emulate_loadstore(vcpu);
276	switch (er) {
277	case EMULATE_DONE:
278		/* Future optimization: only reload non-volatiles if they were
279		 * actually modified. */
280		r = RESUME_GUEST_NV;
281		break;
282	case EMULATE_AGAIN:
283		r = RESUME_GUEST;
284		break;
285	case EMULATE_DO_MMIO:
286		run->exit_reason = KVM_EXIT_MMIO;
287		/* We must reload nonvolatiles because "update" load/store
288		 * instructions modify register state. */
289		/* Future optimization: only reload non-volatiles if they were
290		 * actually modified. */
291		r = RESUME_HOST_NV;
292		break;
293	case EMULATE_FAIL:
294	{
295		u32 last_inst;
296
297		kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
298		/* XXX Deliver Program interrupt to guest. */
299		pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
300		r = RESUME_HOST;
301		break;
302	}
303	default:
304		WARN_ON(1);
305		r = RESUME_GUEST;
306	}
307
308	return r;
309}
310EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
311
312int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
313	      bool data)
314{
315	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
316	struct kvmppc_pte pte;
317	int r;
318
319	vcpu->stat.st++;
320
321	r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
322			 XLATE_WRITE, &pte);
323	if (r < 0)
324		return r;
325
326	*eaddr = pte.raddr;
327
328	if (!pte.may_write)
329		return -EPERM;
330
331	/* Magic page override */
332	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
333	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
334	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
335		void *magic = vcpu->arch.shared;
336		magic += pte.eaddr & 0xfff;
337		memcpy(magic, ptr, size);
338		return EMULATE_DONE;
339	}
340
341	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
342		return EMULATE_DO_MMIO;
343
344	return EMULATE_DONE;
345}
346EXPORT_SYMBOL_GPL(kvmppc_st);
347
348int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
349		      bool data)
350{
351	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
352	struct kvmppc_pte pte;
353	int rc;
354
355	vcpu->stat.ld++;
356
357	rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
358			  XLATE_READ, &pte);
359	if (rc)
360		return rc;
361
362	*eaddr = pte.raddr;
363
364	if (!pte.may_read)
365		return -EPERM;
366
367	if (!data && !pte.may_execute)
368		return -ENOEXEC;
369
370	/* Magic page override */
371	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
372	    ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
373	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
374		void *magic = vcpu->arch.shared;
375		magic += pte.eaddr & 0xfff;
376		memcpy(ptr, magic, size);
377		return EMULATE_DONE;
378	}
379
380	if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
381		return EMULATE_DO_MMIO;
382
383	return EMULATE_DONE;
384}
385EXPORT_SYMBOL_GPL(kvmppc_ld);
386
387int kvm_arch_hardware_enable(void)
388{
389	return 0;
390}
391
392int kvm_arch_hardware_setup(void)
393{
394	return 0;
395}
396
397void kvm_arch_check_processor_compat(void *rtn)
398{
399	*(int *)rtn = kvmppc_core_check_processor_compat();
400}
401
402int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
403{
404	struct kvmppc_ops *kvm_ops = NULL;
405	/*
406	 * if we have both HV and PR enabled, default is HV
407	 */
408	if (type == 0) {
409		if (kvmppc_hv_ops)
410			kvm_ops = kvmppc_hv_ops;
411		else
412			kvm_ops = kvmppc_pr_ops;
413		if (!kvm_ops)
414			goto err_out;
415	} else	if (type == KVM_VM_PPC_HV) {
416		if (!kvmppc_hv_ops)
417			goto err_out;
418		kvm_ops = kvmppc_hv_ops;
419	} else if (type == KVM_VM_PPC_PR) {
420		if (!kvmppc_pr_ops)
421			goto err_out;
422		kvm_ops = kvmppc_pr_ops;
423	} else
424		goto err_out;
425
426	if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
427		return -ENOENT;
428
429	kvm->arch.kvm_ops = kvm_ops;
430	return kvmppc_core_init_vm(kvm);
431err_out:
432	return -EINVAL;
433}
434
435void kvm_arch_destroy_vm(struct kvm *kvm)
436{
437	unsigned int i;
438	struct kvm_vcpu *vcpu;
439
440	kvm_for_each_vcpu(i, vcpu, kvm)
441		kvm_arch_vcpu_free(vcpu);
442
443	mutex_lock(&kvm->lock);
444	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
445		kvm->vcpus[i] = NULL;
446
447	atomic_set(&kvm->online_vcpus, 0);
448
449	kvmppc_core_destroy_vm(kvm);
450
451	mutex_unlock(&kvm->lock);
452
453	/* drop the module reference */
454	module_put(kvm->arch.kvm_ops->owner);
455}
456
457int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
458{
459	int r;
460	/* Assume we're using HV mode when the HV module is loaded */
461	int hv_enabled = kvmppc_hv_ops ? 1 : 0;
462
463	if (kvm) {
464		/*
465		 * Hooray - we know which VM type we're running on. Depend on
466		 * that rather than the guess above.
467		 */
468		hv_enabled = is_kvmppc_hv_enabled(kvm);
469	}
470
471	switch (ext) {
472#ifdef CONFIG_BOOKE
473	case KVM_CAP_PPC_BOOKE_SREGS:
474	case KVM_CAP_PPC_BOOKE_WATCHDOG:
475	case KVM_CAP_PPC_EPR:
476#else
477	case KVM_CAP_PPC_SEGSTATE:
478	case KVM_CAP_PPC_HIOR:
479	case KVM_CAP_PPC_PAPR:
480#endif
481	case KVM_CAP_PPC_UNSET_IRQ:
482	case KVM_CAP_PPC_IRQ_LEVEL:
483	case KVM_CAP_ENABLE_CAP:
484	case KVM_CAP_ENABLE_CAP_VM:
485	case KVM_CAP_ONE_REG:
486	case KVM_CAP_IOEVENTFD:
487	case KVM_CAP_DEVICE_CTRL:
488		r = 1;
489		break;
490	case KVM_CAP_PPC_PAIRED_SINGLES:
491	case KVM_CAP_PPC_OSI:
492	case KVM_CAP_PPC_GET_PVINFO:
493#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
494	case KVM_CAP_SW_TLB:
495#endif
496		/* We support this only for PR */
497		r = !hv_enabled;
498		break;
499#ifdef CONFIG_KVM_MMIO
500	case KVM_CAP_COALESCED_MMIO:
501		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
502		break;
503#endif
504#ifdef CONFIG_KVM_MPIC
505	case KVM_CAP_IRQ_MPIC:
506		r = 1;
507		break;
508#endif
509
510#ifdef CONFIG_PPC_BOOK3S_64
511	case KVM_CAP_SPAPR_TCE:
512	case KVM_CAP_PPC_ALLOC_HTAB:
513	case KVM_CAP_PPC_RTAS:
514	case KVM_CAP_PPC_FIXUP_HCALL:
515	case KVM_CAP_PPC_ENABLE_HCALL:
516#ifdef CONFIG_KVM_XICS
517	case KVM_CAP_IRQ_XICS:
518#endif
519		r = 1;
520		break;
521#endif /* CONFIG_PPC_BOOK3S_64 */
522#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
523	case KVM_CAP_PPC_SMT:
524		if (hv_enabled)
525			r = threads_per_subcore;
526		else
527			r = 0;
528		break;
529	case KVM_CAP_PPC_RMA:
530		r = 0;
531		break;
532	case KVM_CAP_PPC_HWRNG:
533		r = kvmppc_hwrng_present();
534		break;
535#endif
536	case KVM_CAP_SYNC_MMU:
537#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
538		r = hv_enabled;
539#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
540		r = 1;
541#else
542		r = 0;
543#endif
544		break;
545#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
546	case KVM_CAP_PPC_HTAB_FD:
547		r = hv_enabled;
548		break;
549#endif
550	case KVM_CAP_NR_VCPUS:
551		/*
552		 * Recommending a number of CPUs is somewhat arbitrary; we
553		 * return the number of present CPUs for -HV (since a host
554		 * will have secondary threads "offline"), and for other KVM
555		 * implementations just count online CPUs.
556		 */
557		if (hv_enabled)
558			r = num_present_cpus();
559		else
560			r = num_online_cpus();
561		break;
562	case KVM_CAP_MAX_VCPUS:
563		r = KVM_MAX_VCPUS;
564		break;
565#ifdef CONFIG_PPC_BOOK3S_64
566	case KVM_CAP_PPC_GET_SMMU_INFO:
567		r = 1;
568		break;
569#endif
570	default:
571		r = 0;
572		break;
573	}
574	return r;
575
576}
577
578long kvm_arch_dev_ioctl(struct file *filp,
579                        unsigned int ioctl, unsigned long arg)
580{
581	return -EINVAL;
582}
583
584void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
585			   struct kvm_memory_slot *dont)
586{
587	kvmppc_core_free_memslot(kvm, free, dont);
588}
589
590int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
591			    unsigned long npages)
592{
593	return kvmppc_core_create_memslot(kvm, slot, npages);
594}
595
596int kvm_arch_prepare_memory_region(struct kvm *kvm,
597				   struct kvm_memory_slot *memslot,
598				   struct kvm_userspace_memory_region *mem,
599				   enum kvm_mr_change change)
600{
601	return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
602}
603
604void kvm_arch_commit_memory_region(struct kvm *kvm,
605				   struct kvm_userspace_memory_region *mem,
606				   const struct kvm_memory_slot *old,
607				   enum kvm_mr_change change)
608{
609	kvmppc_core_commit_memory_region(kvm, mem, old);
610}
611
612void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
613				   struct kvm_memory_slot *slot)
614{
615	kvmppc_core_flush_memslot(kvm, slot);
616}
617
618struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
619{
620	struct kvm_vcpu *vcpu;
621	vcpu = kvmppc_core_vcpu_create(kvm, id);
622	if (!IS_ERR(vcpu)) {
623		vcpu->arch.wqp = &vcpu->wq;
624		kvmppc_create_vcpu_debugfs(vcpu, id);
625	}
626	return vcpu;
627}
628
629void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
630{
631}
632
633void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
634{
635	/* Make sure we're not using the vcpu anymore */
636	hrtimer_cancel(&vcpu->arch.dec_timer);
637
638	kvmppc_remove_vcpu_debugfs(vcpu);
639
640	switch (vcpu->arch.irq_type) {
641	case KVMPPC_IRQ_MPIC:
642		kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
643		break;
644	case KVMPPC_IRQ_XICS:
645		kvmppc_xics_free_icp(vcpu);
646		break;
647	}
648
649	kvmppc_core_vcpu_free(vcpu);
650}
651
652void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
653{
654	kvm_arch_vcpu_free(vcpu);
655}
656
657int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
658{
659	return kvmppc_core_pending_dec(vcpu);
660}
661
662enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
663{
664	struct kvm_vcpu *vcpu;
665
666	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
667	kvmppc_decrementer_func(vcpu);
668
669	return HRTIMER_NORESTART;
670}
671
672int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
673{
674	int ret;
675
676	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
677	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
678	vcpu->arch.dec_expires = ~(u64)0;
679
680#ifdef CONFIG_KVM_EXIT_TIMING
681	mutex_init(&vcpu->arch.exit_timing_lock);
682#endif
683	ret = kvmppc_subarch_vcpu_init(vcpu);
684	return ret;
685}
686
687void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
688{
689	kvmppc_mmu_destroy(vcpu);
690	kvmppc_subarch_vcpu_uninit(vcpu);
691}
692
693void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
694{
695#ifdef CONFIG_BOOKE
696	/*
697	 * vrsave (formerly usprg0) isn't used by Linux, but may
698	 * be used by the guest.
699	 *
700	 * On non-booke this is associated with Altivec and
701	 * is handled by code in book3s.c.
702	 */
703	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
704#endif
705	kvmppc_core_vcpu_load(vcpu, cpu);
706}
707
708void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
709{
710	kvmppc_core_vcpu_put(vcpu);
711#ifdef CONFIG_BOOKE
712	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
713#endif
714}
715
716static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
717                                      struct kvm_run *run)
718{
719	u64 uninitialized_var(gpr);
720
721	if (run->mmio.len > sizeof(gpr)) {
722		printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
723		return;
724	}
725
726	if (!vcpu->arch.mmio_host_swabbed) {
727		switch (run->mmio.len) {
728		case 8: gpr = *(u64 *)run->mmio.data; break;
729		case 4: gpr = *(u32 *)run->mmio.data; break;
730		case 2: gpr = *(u16 *)run->mmio.data; break;
731		case 1: gpr = *(u8 *)run->mmio.data; break;
732		}
733	} else {
734		switch (run->mmio.len) {
735		case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
736		case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
737		case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
738		case 1: gpr = *(u8 *)run->mmio.data; break;
739		}
740	}
741
742	if (vcpu->arch.mmio_sign_extend) {
743		switch (run->mmio.len) {
744#ifdef CONFIG_PPC64
745		case 4:
746			gpr = (s64)(s32)gpr;
747			break;
748#endif
749		case 2:
750			gpr = (s64)(s16)gpr;
751			break;
752		case 1:
753			gpr = (s64)(s8)gpr;
754			break;
755		}
756	}
757
758	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
759
760	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
761	case KVM_MMIO_REG_GPR:
762		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
763		break;
764	case KVM_MMIO_REG_FPR:
765		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
766		break;
767#ifdef CONFIG_PPC_BOOK3S
768	case KVM_MMIO_REG_QPR:
769		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
770		break;
771	case KVM_MMIO_REG_FQPR:
772		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
773		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
774		break;
775#endif
776	default:
777		BUG();
778	}
779}
780
781int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
782		       unsigned int rt, unsigned int bytes,
783		       int is_default_endian)
784{
785	int idx, ret;
786	bool host_swabbed;
787
788	/* Pity C doesn't have a logical XOR operator */
789	if (kvmppc_need_byteswap(vcpu)) {
790		host_swabbed = is_default_endian;
791	} else {
792		host_swabbed = !is_default_endian;
793	}
794
795	if (bytes > sizeof(run->mmio.data)) {
796		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
797		       run->mmio.len);
798	}
799
800	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
801	run->mmio.len = bytes;
802	run->mmio.is_write = 0;
803
804	vcpu->arch.io_gpr = rt;
805	vcpu->arch.mmio_host_swabbed = host_swabbed;
806	vcpu->mmio_needed = 1;
807	vcpu->mmio_is_write = 0;
808	vcpu->arch.mmio_sign_extend = 0;
809
810	idx = srcu_read_lock(&vcpu->kvm->srcu);
811
812	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
813			      bytes, &run->mmio.data);
814
815	srcu_read_unlock(&vcpu->kvm->srcu, idx);
816
817	if (!ret) {
818		kvmppc_complete_mmio_load(vcpu, run);
819		vcpu->mmio_needed = 0;
820		return EMULATE_DONE;
821	}
822
823	return EMULATE_DO_MMIO;
824}
825EXPORT_SYMBOL_GPL(kvmppc_handle_load);
826
827/* Same as above, but sign extends */
828int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
829			unsigned int rt, unsigned int bytes,
830			int is_default_endian)
831{
832	int r;
833
834	vcpu->arch.mmio_sign_extend = 1;
835	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
836
837	return r;
838}
839
840int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
841			u64 val, unsigned int bytes, int is_default_endian)
842{
843	void *data = run->mmio.data;
844	int idx, ret;
845	bool host_swabbed;
846
847	/* Pity C doesn't have a logical XOR operator */
848	if (kvmppc_need_byteswap(vcpu)) {
849		host_swabbed = is_default_endian;
850	} else {
851		host_swabbed = !is_default_endian;
852	}
853
854	if (bytes > sizeof(run->mmio.data)) {
855		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
856		       run->mmio.len);
857	}
858
859	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
860	run->mmio.len = bytes;
861	run->mmio.is_write = 1;
862	vcpu->mmio_needed = 1;
863	vcpu->mmio_is_write = 1;
864
865	/* Store the value at the lowest bytes in 'data'. */
866	if (!host_swabbed) {
867		switch (bytes) {
868		case 8: *(u64 *)data = val; break;
869		case 4: *(u32 *)data = val; break;
870		case 2: *(u16 *)data = val; break;
871		case 1: *(u8  *)data = val; break;
872		}
873	} else {
874		switch (bytes) {
875		case 8: *(u64 *)data = swab64(val); break;
876		case 4: *(u32 *)data = swab32(val); break;
877		case 2: *(u16 *)data = swab16(val); break;
878		case 1: *(u8  *)data = val; break;
879		}
880	}
881
882	idx = srcu_read_lock(&vcpu->kvm->srcu);
883
884	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
885			       bytes, &run->mmio.data);
886
887	srcu_read_unlock(&vcpu->kvm->srcu, idx);
888
889	if (!ret) {
890		vcpu->mmio_needed = 0;
891		return EMULATE_DONE;
892	}
893
894	return EMULATE_DO_MMIO;
895}
896EXPORT_SYMBOL_GPL(kvmppc_handle_store);
897
898int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
899{
900	int r = 0;
901	union kvmppc_one_reg val;
902	int size;
903
904	size = one_reg_size(reg->id);
905	if (size > sizeof(val))
906		return -EINVAL;
907
908	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
909	if (r == -EINVAL) {
910		r = 0;
911		switch (reg->id) {
912#ifdef CONFIG_ALTIVEC
913		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
914			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
915				r = -ENXIO;
916				break;
917			}
918			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
919			break;
920		case KVM_REG_PPC_VSCR:
921			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
922				r = -ENXIO;
923				break;
924			}
925			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
926			break;
927		case KVM_REG_PPC_VRSAVE:
928			val = get_reg_val(reg->id, vcpu->arch.vrsave);
929			break;
930#endif /* CONFIG_ALTIVEC */
931		default:
932			r = -EINVAL;
933			break;
934		}
935	}
936
937	if (r)
938		return r;
939
940	if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
941		r = -EFAULT;
942
943	return r;
944}
945
946int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
947{
948	int r;
949	union kvmppc_one_reg val;
950	int size;
951
952	size = one_reg_size(reg->id);
953	if (size > sizeof(val))
954		return -EINVAL;
955
956	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
957		return -EFAULT;
958
959	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
960	if (r == -EINVAL) {
961		r = 0;
962		switch (reg->id) {
963#ifdef CONFIG_ALTIVEC
964		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
965			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
966				r = -ENXIO;
967				break;
968			}
969			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
970			break;
971		case KVM_REG_PPC_VSCR:
972			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
973				r = -ENXIO;
974				break;
975			}
976			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
977			break;
978		case KVM_REG_PPC_VRSAVE:
979			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
980				r = -ENXIO;
981				break;
982			}
983			vcpu->arch.vrsave = set_reg_val(reg->id, val);
984			break;
985#endif /* CONFIG_ALTIVEC */
986		default:
987			r = -EINVAL;
988			break;
989		}
990	}
991
992	return r;
993}
994
995int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
996{
997	int r;
998	sigset_t sigsaved;
999
1000	if (vcpu->sigset_active)
1001		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1002
1003	if (vcpu->mmio_needed) {
1004		if (!vcpu->mmio_is_write)
1005			kvmppc_complete_mmio_load(vcpu, run);
1006		vcpu->mmio_needed = 0;
1007	} else if (vcpu->arch.osi_needed) {
1008		u64 *gprs = run->osi.gprs;
1009		int i;
1010
1011		for (i = 0; i < 32; i++)
1012			kvmppc_set_gpr(vcpu, i, gprs[i]);
1013		vcpu->arch.osi_needed = 0;
1014	} else if (vcpu->arch.hcall_needed) {
1015		int i;
1016
1017		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1018		for (i = 0; i < 9; ++i)
1019			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1020		vcpu->arch.hcall_needed = 0;
1021#ifdef CONFIG_BOOKE
1022	} else if (vcpu->arch.epr_needed) {
1023		kvmppc_set_epr(vcpu, run->epr.epr);
1024		vcpu->arch.epr_needed = 0;
1025#endif
1026	}
1027
1028	r = kvmppc_vcpu_run(run, vcpu);
1029
1030	if (vcpu->sigset_active)
1031		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1032
1033	return r;
1034}
1035
1036int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1037{
1038	if (irq->irq == KVM_INTERRUPT_UNSET) {
1039		kvmppc_core_dequeue_external(vcpu);
1040		return 0;
1041	}
1042
1043	kvmppc_core_queue_external(vcpu, irq);
1044
1045	kvm_vcpu_kick(vcpu);
1046
1047	return 0;
1048}
1049
1050static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1051				     struct kvm_enable_cap *cap)
1052{
1053	int r;
1054
1055	if (cap->flags)
1056		return -EINVAL;
1057
1058	switch (cap->cap) {
1059	case KVM_CAP_PPC_OSI:
1060		r = 0;
1061		vcpu->arch.osi_enabled = true;
1062		break;
1063	case KVM_CAP_PPC_PAPR:
1064		r = 0;
1065		vcpu->arch.papr_enabled = true;
1066		break;
1067	case KVM_CAP_PPC_EPR:
1068		r = 0;
1069		if (cap->args[0])
1070			vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1071		else
1072			vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1073		break;
1074#ifdef CONFIG_BOOKE
1075	case KVM_CAP_PPC_BOOKE_WATCHDOG:
1076		r = 0;
1077		vcpu->arch.watchdog_enabled = true;
1078		break;
1079#endif
1080#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1081	case KVM_CAP_SW_TLB: {
1082		struct kvm_config_tlb cfg;
1083		void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1084
1085		r = -EFAULT;
1086		if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1087			break;
1088
1089		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1090		break;
1091	}
1092#endif
1093#ifdef CONFIG_KVM_MPIC
1094	case KVM_CAP_IRQ_MPIC: {
1095		struct fd f;
1096		struct kvm_device *dev;
1097
1098		r = -EBADF;
1099		f = fdget(cap->args[0]);
1100		if (!f.file)
1101			break;
1102
1103		r = -EPERM;
1104		dev = kvm_device_from_filp(f.file);
1105		if (dev)
1106			r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1107
1108		fdput(f);
1109		break;
1110	}
1111#endif
1112#ifdef CONFIG_KVM_XICS
1113	case KVM_CAP_IRQ_XICS: {
1114		struct fd f;
1115		struct kvm_device *dev;
1116
1117		r = -EBADF;
1118		f = fdget(cap->args[0]);
1119		if (!f.file)
1120			break;
1121
1122		r = -EPERM;
1123		dev = kvm_device_from_filp(f.file);
1124		if (dev)
1125			r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1126
1127		fdput(f);
1128		break;
1129	}
1130#endif /* CONFIG_KVM_XICS */
1131	default:
1132		r = -EINVAL;
1133		break;
1134	}
1135
1136	if (!r)
1137		r = kvmppc_sanity_check(vcpu);
1138
1139	return r;
1140}
1141
1142int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1143                                    struct kvm_mp_state *mp_state)
1144{
1145	return -EINVAL;
1146}
1147
1148int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1149                                    struct kvm_mp_state *mp_state)
1150{
1151	return -EINVAL;
1152}
1153
1154long kvm_arch_vcpu_ioctl(struct file *filp,
1155                         unsigned int ioctl, unsigned long arg)
1156{
1157	struct kvm_vcpu *vcpu = filp->private_data;
1158	void __user *argp = (void __user *)arg;
1159	long r;
1160
1161	switch (ioctl) {
1162	case KVM_INTERRUPT: {
1163		struct kvm_interrupt irq;
1164		r = -EFAULT;
1165		if (copy_from_user(&irq, argp, sizeof(irq)))
1166			goto out;
1167		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1168		goto out;
1169	}
1170
1171	case KVM_ENABLE_CAP:
1172	{
1173		struct kvm_enable_cap cap;
1174		r = -EFAULT;
1175		if (copy_from_user(&cap, argp, sizeof(cap)))
1176			goto out;
1177		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1178		break;
1179	}
1180
1181	case KVM_SET_ONE_REG:
1182	case KVM_GET_ONE_REG:
1183	{
1184		struct kvm_one_reg reg;
1185		r = -EFAULT;
1186		if (copy_from_user(&reg, argp, sizeof(reg)))
1187			goto out;
1188		if (ioctl == KVM_SET_ONE_REG)
1189			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1190		else
1191			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1192		break;
1193	}
1194
1195#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1196	case KVM_DIRTY_TLB: {
1197		struct kvm_dirty_tlb dirty;
1198		r = -EFAULT;
1199		if (copy_from_user(&dirty, argp, sizeof(dirty)))
1200			goto out;
1201		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1202		break;
1203	}
1204#endif
1205	default:
1206		r = -EINVAL;
1207	}
1208
1209out:
1210	return r;
1211}
1212
1213int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1214{
1215	return VM_FAULT_SIGBUS;
1216}
1217
1218static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1219{
1220	u32 inst_nop = 0x60000000;
1221#ifdef CONFIG_KVM_BOOKE_HV
1222	u32 inst_sc1 = 0x44000022;
1223	pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1224	pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1225	pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1226	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1227#else
1228	u32 inst_lis = 0x3c000000;
1229	u32 inst_ori = 0x60000000;
1230	u32 inst_sc = 0x44000002;
1231	u32 inst_imm_mask = 0xffff;
1232
1233	/*
1234	 * The hypercall to get into KVM from within guest context is as
1235	 * follows:
1236	 *
1237	 *    lis r0, r0, KVM_SC_MAGIC_R0@h
1238	 *    ori r0, KVM_SC_MAGIC_R0@l
1239	 *    sc
1240	 *    nop
1241	 */
1242	pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1243	pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1244	pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1245	pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1246#endif
1247
1248	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1249
1250	return 0;
1251}
1252
1253int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1254			  bool line_status)
1255{
1256	if (!irqchip_in_kernel(kvm))
1257		return -ENXIO;
1258
1259	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1260					irq_event->irq, irq_event->level,
1261					line_status);
1262	return 0;
1263}
1264
1265
1266static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1267				   struct kvm_enable_cap *cap)
1268{
1269	int r;
1270
1271	if (cap->flags)
1272		return -EINVAL;
1273
1274	switch (cap->cap) {
1275#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1276	case KVM_CAP_PPC_ENABLE_HCALL: {
1277		unsigned long hcall = cap->args[0];
1278
1279		r = -EINVAL;
1280		if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1281		    cap->args[1] > 1)
1282			break;
1283		if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1284			break;
1285		if (cap->args[1])
1286			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1287		else
1288			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1289		r = 0;
1290		break;
1291	}
1292#endif
1293	default:
1294		r = -EINVAL;
1295		break;
1296	}
1297
1298	return r;
1299}
1300
1301long kvm_arch_vm_ioctl(struct file *filp,
1302                       unsigned int ioctl, unsigned long arg)
1303{
1304	struct kvm *kvm __maybe_unused = filp->private_data;
1305	void __user *argp = (void __user *)arg;
1306	long r;
1307
1308	switch (ioctl) {
1309	case KVM_PPC_GET_PVINFO: {
1310		struct kvm_ppc_pvinfo pvinfo;
1311		memset(&pvinfo, 0, sizeof(pvinfo));
1312		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1313		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1314			r = -EFAULT;
1315			goto out;
1316		}
1317
1318		break;
1319	}
1320	case KVM_ENABLE_CAP:
1321	{
1322		struct kvm_enable_cap cap;
1323		r = -EFAULT;
1324		if (copy_from_user(&cap, argp, sizeof(cap)))
1325			goto out;
1326		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1327		break;
1328	}
1329#ifdef CONFIG_PPC_BOOK3S_64
1330	case KVM_CREATE_SPAPR_TCE: {
1331		struct kvm_create_spapr_tce create_tce;
1332
1333		r = -EFAULT;
1334		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1335			goto out;
1336		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1337		goto out;
1338	}
1339	case KVM_PPC_GET_SMMU_INFO: {
1340		struct kvm_ppc_smmu_info info;
1341		struct kvm *kvm = filp->private_data;
1342
1343		memset(&info, 0, sizeof(info));
1344		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1345		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1346			r = -EFAULT;
1347		break;
1348	}
1349	case KVM_PPC_RTAS_DEFINE_TOKEN: {
1350		struct kvm *kvm = filp->private_data;
1351
1352		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1353		break;
1354	}
1355	default: {
1356		struct kvm *kvm = filp->private_data;
1357		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1358	}
1359#else /* CONFIG_PPC_BOOK3S_64 */
1360	default:
1361		r = -ENOTTY;
1362#endif
1363	}
1364out:
1365	return r;
1366}
1367
1368static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1369static unsigned long nr_lpids;
1370
1371long kvmppc_alloc_lpid(void)
1372{
1373	long lpid;
1374
1375	do {
1376		lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1377		if (lpid >= nr_lpids) {
1378			pr_err("%s: No LPIDs free\n", __func__);
1379			return -ENOMEM;
1380		}
1381	} while (test_and_set_bit(lpid, lpid_inuse));
1382
1383	return lpid;
1384}
1385EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1386
1387void kvmppc_claim_lpid(long lpid)
1388{
1389	set_bit(lpid, lpid_inuse);
1390}
1391EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1392
1393void kvmppc_free_lpid(long lpid)
1394{
1395	clear_bit(lpid, lpid_inuse);
1396}
1397EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1398
1399void kvmppc_init_lpid(unsigned long nr_lpids_param)
1400{
1401	nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1402	memset(lpid_inuse, 0, sizeof(lpid_inuse));
1403}
1404EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1405
1406int kvm_arch_init(void *opaque)
1407{
1408	return 0;
1409}
1410
1411EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
1412