1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #ifndef __ARM64_KVM_EMULATE_H__
23 #define __ARM64_KVM_EMULATE_H__
24
25 #include <linux/kvm_host.h>
26
27 #include <asm/esr.h>
28 #include <asm/kvm_arm.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_mmio.h>
31 #include <asm/ptrace.h>
32 #include <asm/cputype.h>
33
34 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
35 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
36
37 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
38 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
39
40 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43
vcpu_reset_hcr(struct kvm_vcpu * vcpu)44 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45 {
46 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
47 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
48 vcpu->arch.hcr_el2 &= ~HCR_RW;
49 }
50
vcpu_get_hcr(struct kvm_vcpu * vcpu)51 static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
52 {
53 return vcpu->arch.hcr_el2;
54 }
55
vcpu_set_hcr(struct kvm_vcpu * vcpu,unsigned long hcr)56 static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
57 {
58 vcpu->arch.hcr_el2 = hcr;
59 }
60
vcpu_pc(const struct kvm_vcpu * vcpu)61 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
62 {
63 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
64 }
65
vcpu_elr_el1(const struct kvm_vcpu * vcpu)66 static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
67 {
68 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
69 }
70
vcpu_cpsr(const struct kvm_vcpu * vcpu)71 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
72 {
73 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
74 }
75
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)76 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
77 {
78 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
79 }
80
kvm_condition_valid(const struct kvm_vcpu * vcpu)81 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
82 {
83 if (vcpu_mode_is_32bit(vcpu))
84 return kvm_condition_valid32(vcpu);
85
86 return true;
87 }
88
kvm_skip_instr(struct kvm_vcpu * vcpu,bool is_wide_instr)89 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
90 {
91 if (vcpu_mode_is_32bit(vcpu))
92 kvm_skip_instr32(vcpu, is_wide_instr);
93 else
94 *vcpu_pc(vcpu) += 4;
95 }
96
vcpu_set_thumb(struct kvm_vcpu * vcpu)97 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
98 {
99 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
100 }
101
102 /*
103 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
104 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
105 * AArch32 with banked registers.
106 */
vcpu_get_reg(const struct kvm_vcpu * vcpu,u8 reg_num)107 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
108 u8 reg_num)
109 {
110 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
111 }
112
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)113 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
114 unsigned long val)
115 {
116 if (reg_num != 31)
117 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
118 }
119
120 /* Get vcpu SPSR for current mode */
vcpu_spsr(const struct kvm_vcpu * vcpu)121 static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
122 {
123 if (vcpu_mode_is_32bit(vcpu))
124 return vcpu_spsr32(vcpu);
125
126 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
127 }
128
vcpu_mode_priv(const struct kvm_vcpu * vcpu)129 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
130 {
131 u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
132
133 if (vcpu_mode_is_32bit(vcpu))
134 return mode > COMPAT_PSR_MODE_USR;
135
136 return mode != PSR_MODE_EL0t;
137 }
138
kvm_vcpu_get_hsr(const struct kvm_vcpu * vcpu)139 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
140 {
141 return vcpu->arch.fault.esr_el2;
142 }
143
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)144 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
145 {
146 return vcpu->arch.fault.far_el2;
147 }
148
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)149 static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
150 {
151 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
152 }
153
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)154 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
155 {
156 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
157 }
158
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)159 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
160 {
161 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
162 }
163
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)164 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
165 {
166 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
167 }
168
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)169 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
170 {
171 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
172 }
173
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)174 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
175 {
176 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
177 }
178
kvm_vcpu_dabt_isextabt(const struct kvm_vcpu * vcpu)179 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
180 {
181 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
182 }
183
kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu * vcpu)184 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
185 {
186 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
187 }
188
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)189 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
190 {
191 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
192 }
193
194 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)195 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
196 {
197 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
198 }
199
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)200 static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
201 {
202 return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
203 }
204
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)205 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
206 {
207 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
208 }
209
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)210 static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
211 {
212 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
213 }
214
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu * vcpu)215 static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
216 {
217 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
218 }
219
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)220 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
221 {
222 return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
223 }
224
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)225 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
226 {
227 if (vcpu_mode_is_32bit(vcpu))
228 *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
229 else
230 vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
231 }
232
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)233 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
234 {
235 if (vcpu_mode_is_32bit(vcpu))
236 return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
237
238 return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
239 }
240
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)241 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
242 unsigned long data,
243 unsigned int len)
244 {
245 if (kvm_vcpu_is_be(vcpu)) {
246 switch (len) {
247 case 1:
248 return data & 0xff;
249 case 2:
250 return be16_to_cpu(data & 0xffff);
251 case 4:
252 return be32_to_cpu(data & 0xffffffff);
253 default:
254 return be64_to_cpu(data);
255 }
256 } else {
257 switch (len) {
258 case 1:
259 return data & 0xff;
260 case 2:
261 return le16_to_cpu(data & 0xffff);
262 case 4:
263 return le32_to_cpu(data & 0xffffffff);
264 default:
265 return le64_to_cpu(data);
266 }
267 }
268
269 return data; /* Leave LE untouched */
270 }
271
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)272 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
273 unsigned long data,
274 unsigned int len)
275 {
276 if (kvm_vcpu_is_be(vcpu)) {
277 switch (len) {
278 case 1:
279 return data & 0xff;
280 case 2:
281 return cpu_to_be16(data & 0xffff);
282 case 4:
283 return cpu_to_be32(data & 0xffffffff);
284 default:
285 return cpu_to_be64(data);
286 }
287 } else {
288 switch (len) {
289 case 1:
290 return data & 0xff;
291 case 2:
292 return cpu_to_le16(data & 0xffff);
293 case 4:
294 return cpu_to_le32(data & 0xffffffff);
295 default:
296 return cpu_to_le64(data);
297 }
298 }
299
300 return data; /* Leave LE untouched */
301 }
302
303 #endif /* __ARM64_KVM_EMULATE_H__ */
304