root/arch/arm/include/asm/kvm_host.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. kvm_arm_init_sve
  2. kvm_init_host_cpu_context
  3. handle_exit_early
  4. __cpu_init_hyp_mode
  5. __cpu_init_stage2
  6. kvm_arch_vm_ioctl_check_extension
  7. kvm_arch_requires_vhe
  8. kvm_arch_hardware_unsetup
  9. kvm_arch_sync_events
  10. kvm_arch_vcpu_uninit
  11. kvm_arch_sched_in
  12. kvm_arch_vcpu_block_finish
  13. kvm_arm_init_debug
  14. kvm_arm_setup_debug
  15. kvm_arm_clear_debug
  16. kvm_arm_reset_debug_ptr
  17. kvm_arch_vcpu_load_fp
  18. kvm_arch_vcpu_ctxsync_fp
  19. kvm_arch_vcpu_put_fp
  20. kvm_vcpu_pmu_restore_guest
  21. kvm_vcpu_pmu_restore_host
  22. kvm_arm_vhe_guest_enter
  23. kvm_arm_vhe_guest_exit
  24. kvm_arm_harden_branch_predictor
  25. kvm_arm_have_ssbd
  26. kvm_vcpu_load_sysregs
  27. kvm_vcpu_put_sysregs
  28. kvm_arm_setup_stage2
  29. kvm_arm_vcpu_finalize
  30. kvm_arm_vcpu_is_finalized

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   5  */
   6 
   7 #ifndef __ARM_KVM_HOST_H__
   8 #define __ARM_KVM_HOST_H__
   9 
  10 #include <linux/errno.h>
  11 #include <linux/types.h>
  12 #include <linux/kvm_types.h>
  13 #include <asm/cputype.h>
  14 #include <asm/kvm.h>
  15 #include <asm/kvm_asm.h>
  16 #include <asm/kvm_mmio.h>
  17 #include <asm/fpstate.h>
  18 #include <kvm/arm_arch_timer.h>
  19 
  20 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
  21 
  22 #define KVM_USER_MEM_SLOTS 32
  23 #define KVM_HAVE_ONE_REG
  24 #define KVM_HALT_POLL_NS_DEFAULT 500000
  25 
  26 #define KVM_VCPU_MAX_FEATURES 2
  27 
  28 #include <kvm/arm_vgic.h>
  29 
  30 
  31 #ifdef CONFIG_ARM_GIC_V3
  32 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
  33 #else
  34 #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
  35 #endif
  36 
  37 #define KVM_REQ_SLEEP \
  38         KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  39 #define KVM_REQ_IRQ_PENDING     KVM_ARCH_REQ(1)
  40 #define KVM_REQ_VCPU_RESET      KVM_ARCH_REQ(2)
  41 
  42 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
  43 
  44 static inline int kvm_arm_init_sve(void) { return 0; }
  45 
  46 u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
  47 int __attribute_const__ kvm_target_cpu(void);
  48 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
  49 void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
  50 
  51 struct kvm_vmid {
  52         /* The VMID generation used for the virt. memory system */
  53         u64    vmid_gen;
  54         u32    vmid;
  55 };
  56 
  57 struct kvm_arch {
  58         /* The last vcpu id that ran on each physical CPU */
  59         int __percpu *last_vcpu_ran;
  60 
  61         /*
  62          * Anything that is not used directly from assembly code goes
  63          * here.
  64          */
  65 
  66         /* The VMID generation used for the virt. memory system */
  67         struct kvm_vmid vmid;
  68 
  69         /* Stage-2 page table */
  70         pgd_t *pgd;
  71         phys_addr_t pgd_phys;
  72 
  73         /* Interrupt controller */
  74         struct vgic_dist        vgic;
  75         int max_vcpus;
  76 
  77         /* Mandated version of PSCI */
  78         u32 psci_version;
  79 };
  80 
  81 #define KVM_NR_MEM_OBJS     40
  82 
  83 /*
  84  * We don't want allocation failures within the mmu code, so we preallocate
  85  * enough memory for a single page fault in a cache.
  86  */
  87 struct kvm_mmu_memory_cache {
  88         int nobjs;
  89         void *objects[KVM_NR_MEM_OBJS];
  90 };
  91 
  92 struct kvm_vcpu_fault_info {
  93         u32 hsr;                /* Hyp Syndrome Register */
  94         u32 hxfar;              /* Hyp Data/Inst. Fault Address Register */
  95         u32 hpfar;              /* Hyp IPA Fault Address Register */
  96 };
  97 
  98 /*
  99  * 0 is reserved as an invalid value.
 100  * Order should be kept in sync with the save/restore code.
 101  */
 102 enum vcpu_sysreg {
 103         __INVALID_SYSREG__,
 104         c0_MPIDR,               /* MultiProcessor ID Register */
 105         c0_CSSELR,              /* Cache Size Selection Register */
 106         c1_SCTLR,               /* System Control Register */
 107         c1_ACTLR,               /* Auxiliary Control Register */
 108         c1_CPACR,               /* Coprocessor Access Control */
 109         c2_TTBR0,               /* Translation Table Base Register 0 */
 110         c2_TTBR0_high,          /* TTBR0 top 32 bits */
 111         c2_TTBR1,               /* Translation Table Base Register 1 */
 112         c2_TTBR1_high,          /* TTBR1 top 32 bits */
 113         c2_TTBCR,               /* Translation Table Base Control R. */
 114         c3_DACR,                /* Domain Access Control Register */
 115         c5_DFSR,                /* Data Fault Status Register */
 116         c5_IFSR,                /* Instruction Fault Status Register */
 117         c5_ADFSR,               /* Auxilary Data Fault Status R */
 118         c5_AIFSR,               /* Auxilary Instrunction Fault Status R */
 119         c6_DFAR,                /* Data Fault Address Register */
 120         c6_IFAR,                /* Instruction Fault Address Register */
 121         c7_PAR,                 /* Physical Address Register */
 122         c7_PAR_high,            /* PAR top 32 bits */
 123         c9_L2CTLR,              /* Cortex A15/A7 L2 Control Register */
 124         c10_PRRR,               /* Primary Region Remap Register */
 125         c10_NMRR,               /* Normal Memory Remap Register */
 126         c12_VBAR,               /* Vector Base Address Register */
 127         c13_CID,                /* Context ID Register */
 128         c13_TID_URW,            /* Thread ID, User R/W */
 129         c13_TID_URO,            /* Thread ID, User R/O */
 130         c13_TID_PRIV,           /* Thread ID, Privileged */
 131         c14_CNTKCTL,            /* Timer Control Register (PL1) */
 132         c10_AMAIR0,             /* Auxilary Memory Attribute Indirection Reg0 */
 133         c10_AMAIR1,             /* Auxilary Memory Attribute Indirection Reg1 */
 134         NR_CP15_REGS            /* Number of regs (incl. invalid) */
 135 };
 136 
 137 struct kvm_cpu_context {
 138         struct kvm_regs gp_regs;
 139         struct vfp_hard_struct vfp;
 140         u32 cp15[NR_CP15_REGS];
 141 };
 142 
 143 struct kvm_host_data {
 144         struct kvm_cpu_context host_ctxt;
 145 };
 146 
 147 typedef struct kvm_host_data kvm_host_data_t;
 148 
 149 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
 150 {
 151         /* The host's MPIDR is immutable, so let's set it up at boot time */
 152         cpu_ctxt->cp15[c0_MPIDR] = read_cpuid_mpidr();
 153 }
 154 
 155 struct vcpu_reset_state {
 156         unsigned long   pc;
 157         unsigned long   r0;
 158         bool            be;
 159         bool            reset;
 160 };
 161 
 162 struct kvm_vcpu_arch {
 163         struct kvm_cpu_context ctxt;
 164 
 165         int target; /* Processor target */
 166         DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
 167 
 168         /* The CPU type we expose to the VM */
 169         u32 midr;
 170 
 171         /* HYP trapping configuration */
 172         u32 hcr;
 173 
 174         /* Exception Information */
 175         struct kvm_vcpu_fault_info fault;
 176 
 177         /* Host FP context */
 178         struct kvm_cpu_context *host_cpu_context;
 179 
 180         /* VGIC state */
 181         struct vgic_cpu vgic_cpu;
 182         struct arch_timer_cpu timer_cpu;
 183 
 184         /*
 185          * Anything that is not used directly from assembly code goes
 186          * here.
 187          */
 188 
 189         /* vcpu power-off state */
 190         bool power_off;
 191 
 192          /* Don't run the guest (internal implementation need) */
 193         bool pause;
 194 
 195         /* IO related fields */
 196         struct kvm_decode mmio_decode;
 197 
 198         /* Cache some mmu pages needed inside spinlock regions */
 199         struct kvm_mmu_memory_cache mmu_page_cache;
 200 
 201         struct vcpu_reset_state reset_state;
 202 
 203         /* Detect first run of a vcpu */
 204         bool has_run_once;
 205 };
 206 
 207 struct kvm_vm_stat {
 208         ulong remote_tlb_flush;
 209 };
 210 
 211 struct kvm_vcpu_stat {
 212         u64 halt_successful_poll;
 213         u64 halt_attempted_poll;
 214         u64 halt_poll_invalid;
 215         u64 halt_wakeup;
 216         u64 hvc_exit_stat;
 217         u64 wfe_exit_stat;
 218         u64 wfi_exit_stat;
 219         u64 mmio_exit_user;
 220         u64 mmio_exit_kernel;
 221         u64 exits;
 222 };
 223 
 224 #define vcpu_cp15(v,r)  (v)->arch.ctxt.cp15[r]
 225 
 226 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
 227 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 228 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
 229 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 230 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 231 
 232 unsigned long __kvm_call_hyp(void *hypfn, ...);
 233 
 234 /*
 235  * The has_vhe() part doesn't get emitted, but is used for type-checking.
 236  */
 237 #define kvm_call_hyp(f, ...)                                            \
 238         do {                                                            \
 239                 if (has_vhe()) {                                        \
 240                         f(__VA_ARGS__);                                 \
 241                 } else {                                                \
 242                         __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
 243                 }                                                       \
 244         } while(0)
 245 
 246 #define kvm_call_hyp_ret(f, ...)                                        \
 247         ({                                                              \
 248                 typeof(f(__VA_ARGS__)) ret;                             \
 249                                                                         \
 250                 if (has_vhe()) {                                        \
 251                         ret = f(__VA_ARGS__);                           \
 252                 } else {                                                \
 253                         ret = __kvm_call_hyp(kvm_ksym_ref(f),           \
 254                                              ##__VA_ARGS__);            \
 255                 }                                                       \
 256                                                                         \
 257                 ret;                                                    \
 258         })
 259 
 260 void force_vm_exit(const cpumask_t *mask);
 261 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
 262                               struct kvm_vcpu_events *events);
 263 
 264 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
 265                               struct kvm_vcpu_events *events);
 266 
 267 #define KVM_ARCH_WANT_MMU_NOTIFIER
 268 int kvm_unmap_hva_range(struct kvm *kvm,
 269                         unsigned long start, unsigned long end);
 270 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 271 
 272 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 273 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
 274 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 275 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 276 
 277 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
 278 struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
 279 void kvm_arm_halt_guest(struct kvm *kvm);
 280 void kvm_arm_resume_guest(struct kvm *kvm);
 281 
 282 int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
 283 unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
 284 int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
 285 int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
 286 
 287 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 288                 int exception_index);
 289 
 290 static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
 291                                      int exception_index) {}
 292 
 293 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
 294                                        unsigned long hyp_stack_ptr,
 295                                        unsigned long vector_ptr)
 296 {
 297         /*
 298          * Call initialization code, and switch to the full blown HYP
 299          * code. The init code doesn't need to preserve these
 300          * registers as r0-r3 are already callee saved according to
 301          * the AAPCS.
 302          * Note that we slightly misuse the prototype by casting the
 303          * stack pointer to a void *.
 304 
 305          * The PGDs are always passed as the third argument, in order
 306          * to be passed into r2-r3 to the init code (yes, this is
 307          * compliant with the PCS!).
 308          */
 309 
 310         __kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
 311 }
 312 
 313 static inline void __cpu_init_stage2(void)
 314 {
 315         kvm_call_hyp(__init_stage2_translation);
 316 }
 317 
 318 static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 319 {
 320         return 0;
 321 }
 322 
 323 int kvm_perf_init(void);
 324 int kvm_perf_teardown(void);
 325 
 326 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 327 
 328 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 329 
 330 static inline bool kvm_arch_requires_vhe(void) { return false; }
 331 static inline void kvm_arch_hardware_unsetup(void) {}
 332 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 333 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
 334 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 335 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 336 
 337 static inline void kvm_arm_init_debug(void) {}
 338 static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
 339 static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
 340 static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
 341 
 342 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
 343                                struct kvm_device_attr *attr);
 344 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
 345                                struct kvm_device_attr *attr);
 346 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
 347                                struct kvm_device_attr *attr);
 348 
 349 /*
 350  * VFP/NEON switching is all done by the hyp switch code, so no need to
 351  * coordinate with host context handling for this state:
 352  */
 353 static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
 354 static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
 355 static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
 356 
 357 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
 358 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
 359 
 360 static inline void kvm_arm_vhe_guest_enter(void) {}
 361 static inline void kvm_arm_vhe_guest_exit(void) {}
 362 
 363 #define KVM_BP_HARDEN_UNKNOWN           -1
 364 #define KVM_BP_HARDEN_WA_NEEDED         0
 365 #define KVM_BP_HARDEN_NOT_REQUIRED      1
 366 
 367 static inline int kvm_arm_harden_branch_predictor(void)
 368 {
 369         switch(read_cpuid_part()) {
 370 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 371         case ARM_CPU_PART_BRAHMA_B15:
 372         case ARM_CPU_PART_CORTEX_A12:
 373         case ARM_CPU_PART_CORTEX_A15:
 374         case ARM_CPU_PART_CORTEX_A17:
 375                 return KVM_BP_HARDEN_WA_NEEDED;
 376 #endif
 377         case ARM_CPU_PART_CORTEX_A7:
 378                 return KVM_BP_HARDEN_NOT_REQUIRED;
 379         default:
 380                 return KVM_BP_HARDEN_UNKNOWN;
 381         }
 382 }
 383 
 384 #define KVM_SSBD_UNKNOWN                -1
 385 #define KVM_SSBD_FORCE_DISABLE          0
 386 #define KVM_SSBD_KERNEL         1
 387 #define KVM_SSBD_FORCE_ENABLE           2
 388 #define KVM_SSBD_MITIGATED              3
 389 
 390 static inline int kvm_arm_have_ssbd(void)
 391 {
 392         /* No way to detect it yet, pretend it is not there. */
 393         return KVM_SSBD_UNKNOWN;
 394 }
 395 
 396 static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
 397 static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
 398 
 399 #define __KVM_HAVE_ARCH_VM_ALLOC
 400 struct kvm *kvm_arch_alloc_vm(void);
 401 void kvm_arch_free_vm(struct kvm *kvm);
 402 
 403 static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
 404 {
 405         /*
 406          * On 32bit ARM, VMs get a static 40bit IPA stage2 setup,
 407          * so any non-zero value used as type is illegal.
 408          */
 409         if (type)
 410                 return -EINVAL;
 411         return 0;
 412 }
 413 
 414 static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
 415 {
 416         return -EINVAL;
 417 }
 418 
 419 static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
 420 {
 421         return true;
 422 }
 423 
 424 #define kvm_arm_vcpu_loaded(vcpu)       (false)
 425 
 426 #endif /* __ARM_KVM_HOST_H__ */

/* [<][>][^][v][top][bottom][index][help] */