1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License.  See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __MIPS_KVM_HOST_H__
11#define __MIPS_KVM_HOST_H__
12
13#include <linux/mutex.h>
14#include <linux/hrtimer.h>
15#include <linux/interrupt.h>
16#include <linux/types.h>
17#include <linux/kvm.h>
18#include <linux/kvm_types.h>
19#include <linux/threads.h>
20#include <linux/spinlock.h>
21
22/* MIPS KVM register ids */
23#define MIPS_CP0_32(_R, _S)					\
24	(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
25
26#define MIPS_CP0_64(_R, _S)					\
27	(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
28
29#define KVM_REG_MIPS_CP0_INDEX		MIPS_CP0_32(0, 0)
30#define KVM_REG_MIPS_CP0_ENTRYLO0	MIPS_CP0_64(2, 0)
31#define KVM_REG_MIPS_CP0_ENTRYLO1	MIPS_CP0_64(3, 0)
32#define KVM_REG_MIPS_CP0_CONTEXT	MIPS_CP0_64(4, 0)
33#define KVM_REG_MIPS_CP0_USERLOCAL	MIPS_CP0_64(4, 2)
34#define KVM_REG_MIPS_CP0_PAGEMASK	MIPS_CP0_32(5, 0)
35#define KVM_REG_MIPS_CP0_PAGEGRAIN	MIPS_CP0_32(5, 1)
36#define KVM_REG_MIPS_CP0_WIRED		MIPS_CP0_32(6, 0)
37#define KVM_REG_MIPS_CP0_HWRENA		MIPS_CP0_32(7, 0)
38#define KVM_REG_MIPS_CP0_BADVADDR	MIPS_CP0_64(8, 0)
39#define KVM_REG_MIPS_CP0_COUNT		MIPS_CP0_32(9, 0)
40#define KVM_REG_MIPS_CP0_ENTRYHI	MIPS_CP0_64(10, 0)
41#define KVM_REG_MIPS_CP0_COMPARE	MIPS_CP0_32(11, 0)
42#define KVM_REG_MIPS_CP0_STATUS		MIPS_CP0_32(12, 0)
43#define KVM_REG_MIPS_CP0_CAUSE		MIPS_CP0_32(13, 0)
44#define KVM_REG_MIPS_CP0_EPC		MIPS_CP0_64(14, 0)
45#define KVM_REG_MIPS_CP0_PRID		MIPS_CP0_32(15, 0)
46#define KVM_REG_MIPS_CP0_EBASE		MIPS_CP0_64(15, 1)
47#define KVM_REG_MIPS_CP0_CONFIG		MIPS_CP0_32(16, 0)
48#define KVM_REG_MIPS_CP0_CONFIG1	MIPS_CP0_32(16, 1)
49#define KVM_REG_MIPS_CP0_CONFIG2	MIPS_CP0_32(16, 2)
50#define KVM_REG_MIPS_CP0_CONFIG3	MIPS_CP0_32(16, 3)
51#define KVM_REG_MIPS_CP0_CONFIG4	MIPS_CP0_32(16, 4)
52#define KVM_REG_MIPS_CP0_CONFIG5	MIPS_CP0_32(16, 5)
53#define KVM_REG_MIPS_CP0_CONFIG7	MIPS_CP0_32(16, 7)
54#define KVM_REG_MIPS_CP0_XCONTEXT	MIPS_CP0_64(20, 0)
55#define KVM_REG_MIPS_CP0_ERROREPC	MIPS_CP0_64(30, 0)
56
57
58#define KVM_MAX_VCPUS		1
59#define KVM_USER_MEM_SLOTS	8
60/* memory slots that does not exposed to userspace */
61#define KVM_PRIVATE_MEM_SLOTS 	0
62
63#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
64
65
66
67/* Special address that contains the comm page, used for reducing # of traps */
68#define KVM_GUEST_COMMPAGE_ADDR		0x0
69
70#define KVM_GUEST_KERNEL_MODE(vcpu)	((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
71					((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
72
73#define KVM_GUEST_KUSEG			0x00000000UL
74#define KVM_GUEST_KSEG0			0x40000000UL
75#define KVM_GUEST_KSEG23		0x60000000UL
76#define KVM_GUEST_KSEGX(a)		((_ACAST32_(a)) & 0x60000000)
77#define KVM_GUEST_CPHYSADDR(a)		((_ACAST32_(a)) & 0x1fffffff)
78
79#define KVM_GUEST_CKSEG0ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
80#define KVM_GUEST_CKSEG1ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
81#define KVM_GUEST_CKSEG23ADDR(a)	(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
82
83/*
84 * Map an address to a certain kernel segment
85 */
86#define KVM_GUEST_KSEG0ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
87#define KVM_GUEST_KSEG1ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
88#define KVM_GUEST_KSEG23ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
89
90#define KVM_INVALID_PAGE		0xdeadbeef
91#define KVM_INVALID_INST		0xdeadbeef
92#define KVM_INVALID_ADDR		0xdeadbeef
93
94#define KVM_MALTA_GUEST_RTC_ADDR	0xb8000070UL
95
96#define GUEST_TICKS_PER_JIFFY		(40000000/HZ)
97#define MS_TO_NS(x)			(x * 1E6L)
98
99#define CAUSEB_DC			27
100#define CAUSEF_DC			(_ULCAST_(1) << 27)
101
102extern atomic_t kvm_mips_instance;
103extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
104extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
105extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
106
107struct kvm_vm_stat {
108	u32 remote_tlb_flush;
109};
110
111struct kvm_vcpu_stat {
112	u32 wait_exits;
113	u32 cache_exits;
114	u32 signal_exits;
115	u32 int_exits;
116	u32 cop_unusable_exits;
117	u32 tlbmod_exits;
118	u32 tlbmiss_ld_exits;
119	u32 tlbmiss_st_exits;
120	u32 addrerr_st_exits;
121	u32 addrerr_ld_exits;
122	u32 syscall_exits;
123	u32 resvd_inst_exits;
124	u32 break_inst_exits;
125	u32 trap_inst_exits;
126	u32 msa_fpe_exits;
127	u32 fpe_exits;
128	u32 msa_disabled_exits;
129	u32 flush_dcache_exits;
130	u32 halt_successful_poll;
131	u32 halt_wakeup;
132};
133
134enum kvm_mips_exit_types {
135	WAIT_EXITS,
136	CACHE_EXITS,
137	SIGNAL_EXITS,
138	INT_EXITS,
139	COP_UNUSABLE_EXITS,
140	TLBMOD_EXITS,
141	TLBMISS_LD_EXITS,
142	TLBMISS_ST_EXITS,
143	ADDRERR_ST_EXITS,
144	ADDRERR_LD_EXITS,
145	SYSCALL_EXITS,
146	RESVD_INST_EXITS,
147	BREAK_INST_EXITS,
148	TRAP_INST_EXITS,
149	MSA_FPE_EXITS,
150	FPE_EXITS,
151	MSA_DISABLED_EXITS,
152	FLUSH_DCACHE_EXITS,
153	MAX_KVM_MIPS_EXIT_TYPES
154};
155
156struct kvm_arch_memory_slot {
157};
158
159struct kvm_arch {
160	/* Guest GVA->HPA page table */
161	unsigned long *guest_pmap;
162	unsigned long guest_pmap_npages;
163
164	/* Wired host TLB used for the commpage */
165	int commpage_tlb;
166};
167
168#define N_MIPS_COPROC_REGS	32
169#define N_MIPS_COPROC_SEL	8
170
171struct mips_coproc {
172	unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
173#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
174	unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
175#endif
176};
177
178/*
179 * Coprocessor 0 register names
180 */
181#define MIPS_CP0_TLB_INDEX	0
182#define MIPS_CP0_TLB_RANDOM	1
183#define MIPS_CP0_TLB_LOW	2
184#define MIPS_CP0_TLB_LO0	2
185#define MIPS_CP0_TLB_LO1	3
186#define MIPS_CP0_TLB_CONTEXT	4
187#define MIPS_CP0_TLB_PG_MASK	5
188#define MIPS_CP0_TLB_WIRED	6
189#define MIPS_CP0_HWRENA		7
190#define MIPS_CP0_BAD_VADDR	8
191#define MIPS_CP0_COUNT		9
192#define MIPS_CP0_TLB_HI		10
193#define MIPS_CP0_COMPARE	11
194#define MIPS_CP0_STATUS		12
195#define MIPS_CP0_CAUSE		13
196#define MIPS_CP0_EXC_PC		14
197#define MIPS_CP0_PRID		15
198#define MIPS_CP0_CONFIG		16
199#define MIPS_CP0_LLADDR		17
200#define MIPS_CP0_WATCH_LO	18
201#define MIPS_CP0_WATCH_HI	19
202#define MIPS_CP0_TLB_XCONTEXT	20
203#define MIPS_CP0_ECC		26
204#define MIPS_CP0_CACHE_ERR	27
205#define MIPS_CP0_TAG_LO		28
206#define MIPS_CP0_TAG_HI		29
207#define MIPS_CP0_ERROR_PC	30
208#define MIPS_CP0_DEBUG		23
209#define MIPS_CP0_DEPC		24
210#define MIPS_CP0_PERFCNT	25
211#define MIPS_CP0_ERRCTL		26
212#define MIPS_CP0_DATA_LO	28
213#define MIPS_CP0_DATA_HI	29
214#define MIPS_CP0_DESAVE		31
215
216#define MIPS_CP0_CONFIG_SEL	0
217#define MIPS_CP0_CONFIG1_SEL	1
218#define MIPS_CP0_CONFIG2_SEL	2
219#define MIPS_CP0_CONFIG3_SEL	3
220#define MIPS_CP0_CONFIG4_SEL	4
221#define MIPS_CP0_CONFIG5_SEL	5
222
223/* Config0 register bits */
224#define CP0C0_M			31
225#define CP0C0_K23		28
226#define CP0C0_KU		25
227#define CP0C0_MDU		20
228#define CP0C0_MM		17
229#define CP0C0_BM		16
230#define CP0C0_BE		15
231#define CP0C0_AT		13
232#define CP0C0_AR		10
233#define CP0C0_MT		7
234#define CP0C0_VI		3
235#define CP0C0_K0		0
236
237/* Config1 register bits */
238#define CP0C1_M			31
239#define CP0C1_MMU		25
240#define CP0C1_IS		22
241#define CP0C1_IL		19
242#define CP0C1_IA		16
243#define CP0C1_DS		13
244#define CP0C1_DL		10
245#define CP0C1_DA		7
246#define CP0C1_C2		6
247#define CP0C1_MD		5
248#define CP0C1_PC		4
249#define CP0C1_WR		3
250#define CP0C1_CA		2
251#define CP0C1_EP		1
252#define CP0C1_FP		0
253
254/* Config2 Register bits */
255#define CP0C2_M			31
256#define CP0C2_TU		28
257#define CP0C2_TS		24
258#define CP0C2_TL		20
259#define CP0C2_TA		16
260#define CP0C2_SU		12
261#define CP0C2_SS		8
262#define CP0C2_SL		4
263#define CP0C2_SA		0
264
265/* Config3 Register bits */
266#define CP0C3_M			31
267#define CP0C3_ISA_ON_EXC	16
268#define CP0C3_ULRI		13
269#define CP0C3_DSPP		10
270#define CP0C3_LPA		7
271#define CP0C3_VEIC		6
272#define CP0C3_VInt		5
273#define CP0C3_SP		4
274#define CP0C3_MT		2
275#define CP0C3_SM		1
276#define CP0C3_TL		0
277
278/* MMU types, the first four entries have the same layout as the
279   CP0C0_MT field.  */
280enum mips_mmu_types {
281	MMU_TYPE_NONE,
282	MMU_TYPE_R4000,
283	MMU_TYPE_RESERVED,
284	MMU_TYPE_FMT,
285	MMU_TYPE_R3000,
286	MMU_TYPE_R6000,
287	MMU_TYPE_R8000
288};
289
290/*
291 * Trap codes
292 */
293#define T_INT			0	/* Interrupt pending */
294#define T_TLB_MOD		1	/* TLB modified fault */
295#define T_TLB_LD_MISS		2	/* TLB miss on load or ifetch */
296#define T_TLB_ST_MISS		3	/* TLB miss on a store */
297#define T_ADDR_ERR_LD		4	/* Address error on a load or ifetch */
298#define T_ADDR_ERR_ST		5	/* Address error on a store */
299#define T_BUS_ERR_IFETCH	6	/* Bus error on an ifetch */
300#define T_BUS_ERR_LD_ST		7	/* Bus error on a load or store */
301#define T_SYSCALL		8	/* System call */
302#define T_BREAK			9	/* Breakpoint */
303#define T_RES_INST		10	/* Reserved instruction exception */
304#define T_COP_UNUSABLE		11	/* Coprocessor unusable */
305#define T_OVFLOW		12	/* Arithmetic overflow */
306
307/*
308 * Trap definitions added for r4000 port.
309 */
310#define T_TRAP			13	/* Trap instruction */
311#define T_VCEI			14	/* Virtual coherency exception */
312#define T_MSAFPE		14	/* MSA floating point exception */
313#define T_FPE			15	/* Floating point exception */
314#define T_MSADIS		21	/* MSA disabled exception */
315#define T_WATCH			23	/* Watch address reference */
316#define T_VCED			31	/* Virtual coherency data */
317
318/* Resume Flags */
319#define RESUME_FLAG_DR		(1<<0)	/* Reload guest nonvolatile state? */
320#define RESUME_FLAG_HOST	(1<<1)	/* Resume host? */
321
322#define RESUME_GUEST		0
323#define RESUME_GUEST_DR		RESUME_FLAG_DR
324#define RESUME_HOST		RESUME_FLAG_HOST
325
326enum emulation_result {
327	EMULATE_DONE,		/* no further processing */
328	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
329	EMULATE_FAIL,		/* can't emulate this instruction */
330	EMULATE_WAIT,		/* WAIT instruction */
331	EMULATE_PRIV_FAIL,
332};
333
334#define MIPS3_PG_G	0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
335#define MIPS3_PG_V	0x00000002 /* Valid */
336#define MIPS3_PG_NV	0x00000000
337#define MIPS3_PG_D	0x00000004 /* Dirty */
338
339#define mips3_paddr_to_tlbpfn(x) \
340	(((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
341#define mips3_tlbpfn_to_paddr(x) \
342	((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
343
344#define MIPS3_PG_SHIFT		6
345#define MIPS3_PG_FRAME		0x3fffffc0
346
347#define VPN2_MASK		0xffffe000
348#define TLB_IS_GLOBAL(x)	(((x).tlb_lo0 & MIPS3_PG_G) &&		\
349				 ((x).tlb_lo1 & MIPS3_PG_G))
350#define TLB_VPN2(x)		((x).tlb_hi & VPN2_MASK)
351#define TLB_ASID(x)		((x).tlb_hi & ASID_MASK)
352#define TLB_IS_VALID(x, va)	(((va) & (1 << PAGE_SHIFT))		\
353				 ? ((x).tlb_lo1 & MIPS3_PG_V)		\
354				 : ((x).tlb_lo0 & MIPS3_PG_V))
355#define TLB_HI_VPN2_HIT(x, y)	((TLB_VPN2(x) & ~(x).tlb_mask) ==	\
356				 ((y) & VPN2_MASK & ~(x).tlb_mask))
357#define TLB_HI_ASID_HIT(x, y)	(TLB_IS_GLOBAL(x) ||			\
358				 TLB_ASID(x) == ((y) & ASID_MASK))
359
360struct kvm_mips_tlb {
361	long tlb_mask;
362	long tlb_hi;
363	long tlb_lo0;
364	long tlb_lo1;
365};
366
367#define KVM_MIPS_FPU_FPU	0x1
368#define KVM_MIPS_FPU_MSA	0x2
369
370#define KVM_MIPS_GUEST_TLB_SIZE	64
371struct kvm_vcpu_arch {
372	void *host_ebase, *guest_ebase;
373	unsigned long host_stack;
374	unsigned long host_gp;
375
376	/* Host CP0 registers used when handling exits from guest */
377	unsigned long host_cp0_badvaddr;
378	unsigned long host_cp0_cause;
379	unsigned long host_cp0_epc;
380	unsigned long host_cp0_entryhi;
381	uint32_t guest_inst;
382
383	/* GPRS */
384	unsigned long gprs[32];
385	unsigned long hi;
386	unsigned long lo;
387	unsigned long pc;
388
389	/* FPU State */
390	struct mips_fpu_struct fpu;
391	/* Which FPU state is loaded (KVM_MIPS_FPU_*) */
392	unsigned int fpu_inuse;
393
394	/* COP0 State */
395	struct mips_coproc *cop0;
396
397	/* Host KSEG0 address of the EI/DI offset */
398	void *kseg0_commpage;
399
400	u32 io_gpr;		/* GPR used as IO source/target */
401
402	struct hrtimer comparecount_timer;
403	/* Count timer control KVM register */
404	uint32_t count_ctl;
405	/* Count bias from the raw time */
406	uint32_t count_bias;
407	/* Frequency of timer in Hz */
408	uint32_t count_hz;
409	/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
410	s64 count_dyn_bias;
411	/* Resume time */
412	ktime_t count_resume;
413	/* Period of timer tick in ns */
414	u64 count_period;
415
416	/* Bitmask of exceptions that are pending */
417	unsigned long pending_exceptions;
418
419	/* Bitmask of pending exceptions to be cleared */
420	unsigned long pending_exceptions_clr;
421
422	unsigned long pending_load_cause;
423
424	/* Save/Restore the entryhi register when are are preempted/scheduled back in */
425	unsigned long preempt_entryhi;
426
427	/* S/W Based TLB for guest */
428	struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
429
430	/* Cached guest kernel/user ASIDs */
431	uint32_t guest_user_asid[NR_CPUS];
432	uint32_t guest_kernel_asid[NR_CPUS];
433	struct mm_struct guest_kernel_mm, guest_user_mm;
434
435	int last_sched_cpu;
436
437	/* WAIT executed */
438	int wait;
439
440	u8 fpu_enabled;
441	u8 msa_enabled;
442};
443
444
445#define kvm_read_c0_guest_index(cop0)		(cop0->reg[MIPS_CP0_TLB_INDEX][0])
446#define kvm_write_c0_guest_index(cop0, val)	(cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
447#define kvm_read_c0_guest_entrylo0(cop0)	(cop0->reg[MIPS_CP0_TLB_LO0][0])
448#define kvm_read_c0_guest_entrylo1(cop0)	(cop0->reg[MIPS_CP0_TLB_LO1][0])
449#define kvm_read_c0_guest_context(cop0)		(cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
450#define kvm_write_c0_guest_context(cop0, val)	(cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
451#define kvm_read_c0_guest_userlocal(cop0)	(cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
452#define kvm_write_c0_guest_userlocal(cop0, val)	(cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
453#define kvm_read_c0_guest_pagemask(cop0)	(cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
454#define kvm_write_c0_guest_pagemask(cop0, val)	(cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
455#define kvm_read_c0_guest_wired(cop0)		(cop0->reg[MIPS_CP0_TLB_WIRED][0])
456#define kvm_write_c0_guest_wired(cop0, val)	(cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
457#define kvm_read_c0_guest_hwrena(cop0)		(cop0->reg[MIPS_CP0_HWRENA][0])
458#define kvm_write_c0_guest_hwrena(cop0, val)	(cop0->reg[MIPS_CP0_HWRENA][0] = (val))
459#define kvm_read_c0_guest_badvaddr(cop0)	(cop0->reg[MIPS_CP0_BAD_VADDR][0])
460#define kvm_write_c0_guest_badvaddr(cop0, val)	(cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
461#define kvm_read_c0_guest_count(cop0)		(cop0->reg[MIPS_CP0_COUNT][0])
462#define kvm_write_c0_guest_count(cop0, val)	(cop0->reg[MIPS_CP0_COUNT][0] = (val))
463#define kvm_read_c0_guest_entryhi(cop0)		(cop0->reg[MIPS_CP0_TLB_HI][0])
464#define kvm_write_c0_guest_entryhi(cop0, val)	(cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
465#define kvm_read_c0_guest_compare(cop0)		(cop0->reg[MIPS_CP0_COMPARE][0])
466#define kvm_write_c0_guest_compare(cop0, val)	(cop0->reg[MIPS_CP0_COMPARE][0] = (val))
467#define kvm_read_c0_guest_status(cop0)		(cop0->reg[MIPS_CP0_STATUS][0])
468#define kvm_write_c0_guest_status(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][0] = (val))
469#define kvm_read_c0_guest_intctl(cop0)		(cop0->reg[MIPS_CP0_STATUS][1])
470#define kvm_write_c0_guest_intctl(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][1] = (val))
471#define kvm_read_c0_guest_cause(cop0)		(cop0->reg[MIPS_CP0_CAUSE][0])
472#define kvm_write_c0_guest_cause(cop0, val)	(cop0->reg[MIPS_CP0_CAUSE][0] = (val))
473#define kvm_read_c0_guest_epc(cop0)		(cop0->reg[MIPS_CP0_EXC_PC][0])
474#define kvm_write_c0_guest_epc(cop0, val)	(cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
475#define kvm_read_c0_guest_prid(cop0)		(cop0->reg[MIPS_CP0_PRID][0])
476#define kvm_write_c0_guest_prid(cop0, val)	(cop0->reg[MIPS_CP0_PRID][0] = (val))
477#define kvm_read_c0_guest_ebase(cop0)		(cop0->reg[MIPS_CP0_PRID][1])
478#define kvm_write_c0_guest_ebase(cop0, val)	(cop0->reg[MIPS_CP0_PRID][1] = (val))
479#define kvm_read_c0_guest_config(cop0)		(cop0->reg[MIPS_CP0_CONFIG][0])
480#define kvm_read_c0_guest_config1(cop0)		(cop0->reg[MIPS_CP0_CONFIG][1])
481#define kvm_read_c0_guest_config2(cop0)		(cop0->reg[MIPS_CP0_CONFIG][2])
482#define kvm_read_c0_guest_config3(cop0)		(cop0->reg[MIPS_CP0_CONFIG][3])
483#define kvm_read_c0_guest_config4(cop0)		(cop0->reg[MIPS_CP0_CONFIG][4])
484#define kvm_read_c0_guest_config5(cop0)		(cop0->reg[MIPS_CP0_CONFIG][5])
485#define kvm_read_c0_guest_config7(cop0)		(cop0->reg[MIPS_CP0_CONFIG][7])
486#define kvm_write_c0_guest_config(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][0] = (val))
487#define kvm_write_c0_guest_config1(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][1] = (val))
488#define kvm_write_c0_guest_config2(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][2] = (val))
489#define kvm_write_c0_guest_config3(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][3] = (val))
490#define kvm_write_c0_guest_config4(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][4] = (val))
491#define kvm_write_c0_guest_config5(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][5] = (val))
492#define kvm_write_c0_guest_config7(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][7] = (val))
493#define kvm_read_c0_guest_errorepc(cop0)	(cop0->reg[MIPS_CP0_ERROR_PC][0])
494#define kvm_write_c0_guest_errorepc(cop0, val)	(cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
495
496/*
497 * Some of the guest registers may be modified asynchronously (e.g. from a
498 * hrtimer callback in hard irq context) and therefore need stronger atomicity
499 * guarantees than other registers.
500 */
501
502static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
503						unsigned long val)
504{
505	unsigned long temp;
506	do {
507		__asm__ __volatile__(
508		"	.set	mips3				\n"
509		"	" __LL "%0, %1				\n"
510		"	or	%0, %2				\n"
511		"	" __SC	"%0, %1				\n"
512		"	.set	mips0				\n"
513		: "=&r" (temp), "+m" (*reg)
514		: "r" (val));
515	} while (unlikely(!temp));
516}
517
518static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
519						  unsigned long val)
520{
521	unsigned long temp;
522	do {
523		__asm__ __volatile__(
524		"	.set	mips3				\n"
525		"	" __LL "%0, %1				\n"
526		"	and	%0, %2				\n"
527		"	" __SC	"%0, %1				\n"
528		"	.set	mips0				\n"
529		: "=&r" (temp), "+m" (*reg)
530		: "r" (~val));
531	} while (unlikely(!temp));
532}
533
534static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
535						   unsigned long change,
536						   unsigned long val)
537{
538	unsigned long temp;
539	do {
540		__asm__ __volatile__(
541		"	.set	mips3				\n"
542		"	" __LL "%0, %1				\n"
543		"	and	%0, %2				\n"
544		"	or	%0, %3				\n"
545		"	" __SC	"%0, %1				\n"
546		"	.set	mips0				\n"
547		: "=&r" (temp), "+m" (*reg)
548		: "r" (~change), "r" (val & change));
549	} while (unlikely(!temp));
550}
551
552#define kvm_set_c0_guest_status(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][0] |= (val))
553#define kvm_clear_c0_guest_status(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
554
555/* Cause can be modified asynchronously from hardirq hrtimer callback */
556#define kvm_set_c0_guest_cause(cop0, val)				\
557	_kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
558#define kvm_clear_c0_guest_cause(cop0, val)				\
559	_kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
560#define kvm_change_c0_guest_cause(cop0, change, val)			\
561	_kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0],	\
562					change, val)
563
564#define kvm_set_c0_guest_ebase(cop0, val)	(cop0->reg[MIPS_CP0_PRID][1] |= (val))
565#define kvm_clear_c0_guest_ebase(cop0, val)	(cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
566#define kvm_change_c0_guest_ebase(cop0, change, val)			\
567{									\
568	kvm_clear_c0_guest_ebase(cop0, change);				\
569	kvm_set_c0_guest_ebase(cop0, ((val) & (change)));		\
570}
571
572/* Helpers */
573
574static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
575{
576	return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
577		vcpu->fpu_enabled;
578}
579
580static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
581{
582	return kvm_mips_guest_can_have_fpu(vcpu) &&
583		kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
584}
585
586static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
587{
588	return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
589		vcpu->msa_enabled;
590}
591
592static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
593{
594	return kvm_mips_guest_can_have_msa(vcpu) &&
595		kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
596}
597
598struct kvm_mips_callbacks {
599	int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
600	int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
601	int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
602	int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
603	int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
604	int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
605	int (*handle_syscall)(struct kvm_vcpu *vcpu);
606	int (*handle_res_inst)(struct kvm_vcpu *vcpu);
607	int (*handle_break)(struct kvm_vcpu *vcpu);
608	int (*handle_trap)(struct kvm_vcpu *vcpu);
609	int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
610	int (*handle_fpe)(struct kvm_vcpu *vcpu);
611	int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
612	int (*vm_init)(struct kvm *kvm);
613	int (*vcpu_init)(struct kvm_vcpu *vcpu);
614	int (*vcpu_setup)(struct kvm_vcpu *vcpu);
615	gpa_t (*gva_to_gpa)(gva_t gva);
616	void (*queue_timer_int)(struct kvm_vcpu *vcpu);
617	void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
618	void (*queue_io_int)(struct kvm_vcpu *vcpu,
619			     struct kvm_mips_interrupt *irq);
620	void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
621			       struct kvm_mips_interrupt *irq);
622	int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
623			   uint32_t cause);
624	int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
625			 uint32_t cause);
626	int (*get_one_reg)(struct kvm_vcpu *vcpu,
627			   const struct kvm_one_reg *reg, s64 *v);
628	int (*set_one_reg)(struct kvm_vcpu *vcpu,
629			   const struct kvm_one_reg *reg, s64 v);
630	int (*vcpu_get_regs)(struct kvm_vcpu *vcpu);
631	int (*vcpu_set_regs)(struct kvm_vcpu *vcpu);
632};
633extern struct kvm_mips_callbacks *kvm_mips_callbacks;
634int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
635
636/* Debug: dump vcpu state */
637int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
638
639/* Trampoline ASM routine to start running in "Guest" context */
640extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
641
642/* FPU/MSA context management */
643void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
644void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
645void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
646void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
647void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
648void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
649void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
650void kvm_own_fpu(struct kvm_vcpu *vcpu);
651void kvm_own_msa(struct kvm_vcpu *vcpu);
652void kvm_drop_fpu(struct kvm_vcpu *vcpu);
653void kvm_lose_fpu(struct kvm_vcpu *vcpu);
654
655/* TLB handling */
656uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
657
658uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
659
660uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
661
662extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
663					   struct kvm_vcpu *vcpu);
664
665extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
666					      struct kvm_vcpu *vcpu);
667
668extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
669						struct kvm_mips_tlb *tlb,
670						unsigned long *hpa0,
671						unsigned long *hpa1);
672
673extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
674						     uint32_t *opc,
675						     struct kvm_run *run,
676						     struct kvm_vcpu *vcpu);
677
678extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
679						    uint32_t *opc,
680						    struct kvm_run *run,
681						    struct kvm_vcpu *vcpu);
682
683extern void kvm_mips_dump_host_tlbs(void);
684extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
685extern void kvm_mips_flush_host_tlb(int skip_kseg0);
686extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
687extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
688
689extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
690				     unsigned long entryhi);
691extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
692extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
693						   unsigned long gva);
694extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
695				    struct kvm_vcpu *vcpu);
696extern void kvm_local_flush_tlb_all(void);
697extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
698extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
699extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
700
701/* Emulation */
702uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
703enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
704
705extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
706						   uint32_t *opc,
707						   struct kvm_run *run,
708						   struct kvm_vcpu *vcpu);
709
710extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
711						      uint32_t *opc,
712						      struct kvm_run *run,
713						      struct kvm_vcpu *vcpu);
714
715extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
716							 uint32_t *opc,
717							 struct kvm_run *run,
718							 struct kvm_vcpu *vcpu);
719
720extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
721							uint32_t *opc,
722							struct kvm_run *run,
723							struct kvm_vcpu *vcpu);
724
725extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
726							 uint32_t *opc,
727							 struct kvm_run *run,
728							 struct kvm_vcpu *vcpu);
729
730extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
731							uint32_t *opc,
732							struct kvm_run *run,
733							struct kvm_vcpu *vcpu);
734
735extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
736						     uint32_t *opc,
737						     struct kvm_run *run,
738						     struct kvm_vcpu *vcpu);
739
740extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
741						      uint32_t *opc,
742						      struct kvm_run *run,
743						      struct kvm_vcpu *vcpu);
744
745extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
746						uint32_t *opc,
747						struct kvm_run *run,
748						struct kvm_vcpu *vcpu);
749
750extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
751						     uint32_t *opc,
752						     struct kvm_run *run,
753						     struct kvm_vcpu *vcpu);
754
755extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
756						     uint32_t *opc,
757						     struct kvm_run *run,
758						     struct kvm_vcpu *vcpu);
759
760extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
761						       uint32_t *opc,
762						       struct kvm_run *run,
763						       struct kvm_vcpu *vcpu);
764
765extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
766							 uint32_t *opc,
767							 struct kvm_run *run,
768							 struct kvm_vcpu *vcpu);
769
770extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
771						      uint32_t *opc,
772						      struct kvm_run *run,
773						      struct kvm_vcpu *vcpu);
774
775extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
776							 uint32_t *opc,
777							 struct kvm_run *run,
778							 struct kvm_vcpu *vcpu);
779
780extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
781							 struct kvm_run *run);
782
783uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
784void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
785void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
786void kvm_mips_init_count(struct kvm_vcpu *vcpu);
787int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
788int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
789int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
790void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
791void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
792enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
793
794enum emulation_result kvm_mips_check_privilege(unsigned long cause,
795					       uint32_t *opc,
796					       struct kvm_run *run,
797					       struct kvm_vcpu *vcpu);
798
799enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
800					     uint32_t *opc,
801					     uint32_t cause,
802					     struct kvm_run *run,
803					     struct kvm_vcpu *vcpu);
804enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
805					   uint32_t *opc,
806					   uint32_t cause,
807					   struct kvm_run *run,
808					   struct kvm_vcpu *vcpu);
809enum emulation_result kvm_mips_emulate_store(uint32_t inst,
810					     uint32_t cause,
811					     struct kvm_run *run,
812					     struct kvm_vcpu *vcpu);
813enum emulation_result kvm_mips_emulate_load(uint32_t inst,
814					    uint32_t cause,
815					    struct kvm_run *run,
816					    struct kvm_vcpu *vcpu);
817
818unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
819unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
820unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
821unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
822
823/* Dynamic binary translation */
824extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
825				      struct kvm_vcpu *vcpu);
826extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
827				   struct kvm_vcpu *vcpu);
828extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
829			       struct kvm_vcpu *vcpu);
830extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
831			       struct kvm_vcpu *vcpu);
832
833/* Misc */
834extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
835extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
836
837static inline void kvm_arch_hardware_disable(void) {}
838static inline void kvm_arch_hardware_unsetup(void) {}
839static inline void kvm_arch_sync_events(struct kvm *kvm) {}
840static inline void kvm_arch_free_memslot(struct kvm *kvm,
841		struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
842static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
843static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
844static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
845		struct kvm_memory_slot *slot) {}
846static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
847static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
848
849#endif /* __MIPS_KVM_HOST_H__ */
850