1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * Derived from arch/arm/include/kvm_emulate.h
6  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #ifndef __ARM64_KVM_EMULATE_H__
23 #define __ARM64_KVM_EMULATE_H__
24 
25 #include <linux/kvm_host.h>
26 
27 #include <asm/esr.h>
28 #include <asm/kvm_arm.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_mmio.h>
31 #include <asm/ptrace.h>
32 #include <asm/cputype.h>
33 
34 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
35 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
36 
37 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
38 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
39 
40 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43 
vcpu_reset_hcr(struct kvm_vcpu * vcpu)44 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45 {
46 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
47 	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
48 		vcpu->arch.hcr_el2 &= ~HCR_RW;
49 }
50 
vcpu_get_hcr(struct kvm_vcpu * vcpu)51 static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
52 {
53 	return vcpu->arch.hcr_el2;
54 }
55 
vcpu_set_hcr(struct kvm_vcpu * vcpu,unsigned long hcr)56 static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
57 {
58 	vcpu->arch.hcr_el2 = hcr;
59 }
60 
vcpu_pc(const struct kvm_vcpu * vcpu)61 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
62 {
63 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
64 }
65 
vcpu_elr_el1(const struct kvm_vcpu * vcpu)66 static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
67 {
68 	return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
69 }
70 
vcpu_cpsr(const struct kvm_vcpu * vcpu)71 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
72 {
73 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
74 }
75 
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)76 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
77 {
78 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
79 }
80 
kvm_condition_valid(const struct kvm_vcpu * vcpu)81 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
82 {
83 	if (vcpu_mode_is_32bit(vcpu))
84 		return kvm_condition_valid32(vcpu);
85 
86 	return true;
87 }
88 
kvm_skip_instr(struct kvm_vcpu * vcpu,bool is_wide_instr)89 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
90 {
91 	if (vcpu_mode_is_32bit(vcpu))
92 		kvm_skip_instr32(vcpu, is_wide_instr);
93 	else
94 		*vcpu_pc(vcpu) += 4;
95 }
96 
vcpu_set_thumb(struct kvm_vcpu * vcpu)97 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
98 {
99 	*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
100 }
101 
102 /*
103  * vcpu_reg should always be passed a register number coming from a
104  * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
105  * with banked registers.
106  */
vcpu_reg(const struct kvm_vcpu * vcpu,u8 reg_num)107 static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
108 {
109 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
110 }
111 
112 /* Get vcpu SPSR for current mode */
vcpu_spsr(const struct kvm_vcpu * vcpu)113 static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
114 {
115 	if (vcpu_mode_is_32bit(vcpu))
116 		return vcpu_spsr32(vcpu);
117 
118 	return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
119 }
120 
vcpu_mode_priv(const struct kvm_vcpu * vcpu)121 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
122 {
123 	u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
124 
125 	if (vcpu_mode_is_32bit(vcpu))
126 		return mode > COMPAT_PSR_MODE_USR;
127 
128 	return mode != PSR_MODE_EL0t;
129 }
130 
kvm_vcpu_get_hsr(const struct kvm_vcpu * vcpu)131 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
132 {
133 	return vcpu->arch.fault.esr_el2;
134 }
135 
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)136 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
137 {
138 	return vcpu->arch.fault.far_el2;
139 }
140 
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)141 static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
142 {
143 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
144 }
145 
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)146 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
147 {
148 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
149 }
150 
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)151 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
152 {
153 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
154 }
155 
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)156 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
157 {
158 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
159 }
160 
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)161 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
162 {
163 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
164 }
165 
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)166 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
167 {
168 	return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
169 }
170 
kvm_vcpu_dabt_isextabt(const struct kvm_vcpu * vcpu)171 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
172 {
173 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
174 }
175 
kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu * vcpu)176 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
177 {
178 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
179 }
180 
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)181 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
182 {
183 	return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
184 }
185 
186 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)187 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
188 {
189 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
190 }
191 
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)192 static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
193 {
194 	return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
195 }
196 
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)197 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
198 {
199 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
200 }
201 
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)202 static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
203 {
204 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
205 }
206 
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu * vcpu)207 static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
208 {
209 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
210 }
211 
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)212 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
213 {
214 	return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
215 }
216 
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)217 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
218 {
219 	if (vcpu_mode_is_32bit(vcpu))
220 		*vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
221 	else
222 		vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
223 }
224 
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)225 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
226 {
227 	if (vcpu_mode_is_32bit(vcpu))
228 		return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
229 
230 	return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
231 }
232 
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)233 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
234 						    unsigned long data,
235 						    unsigned int len)
236 {
237 	if (kvm_vcpu_is_be(vcpu)) {
238 		switch (len) {
239 		case 1:
240 			return data & 0xff;
241 		case 2:
242 			return be16_to_cpu(data & 0xffff);
243 		case 4:
244 			return be32_to_cpu(data & 0xffffffff);
245 		default:
246 			return be64_to_cpu(data);
247 		}
248 	} else {
249 		switch (len) {
250 		case 1:
251 			return data & 0xff;
252 		case 2:
253 			return le16_to_cpu(data & 0xffff);
254 		case 4:
255 			return le32_to_cpu(data & 0xffffffff);
256 		default:
257 			return le64_to_cpu(data);
258 		}
259 	}
260 
261 	return data;		/* Leave LE untouched */
262 }
263 
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)264 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
265 						    unsigned long data,
266 						    unsigned int len)
267 {
268 	if (kvm_vcpu_is_be(vcpu)) {
269 		switch (len) {
270 		case 1:
271 			return data & 0xff;
272 		case 2:
273 			return cpu_to_be16(data & 0xffff);
274 		case 4:
275 			return cpu_to_be32(data & 0xffffffff);
276 		default:
277 			return cpu_to_be64(data);
278 		}
279 	} else {
280 		switch (len) {
281 		case 1:
282 			return data & 0xff;
283 		case 2:
284 			return cpu_to_le16(data & 0xffff);
285 		case 4:
286 			return cpu_to_le32(data & 0xffffffff);
287 		default:
288 			return cpu_to_le64(data);
289 		}
290 	}
291 
292 	return data;		/* Leave LE untouched */
293 }
294 
295 #endif /* __ARM64_KVM_EMULATE_H__ */
296