root/arch/arm64/include/asm/kvm_host.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. kvm_init_host_cpu_context
  2. __cpu_init_hyp_mode
  3. kvm_arch_requires_vhe
  4. kvm_arch_hardware_unsetup
  5. kvm_arch_sync_events
  6. kvm_arch_sched_in
  7. kvm_arch_vcpu_block_finish
  8. __cpu_init_stage2
  9. kvm_pmu_counter_deferred
  10. kvm_arch_vcpu_run_pid_change
  11. kvm_set_pmu_events
  12. kvm_clr_pmu_events
  13. kvm_arm_vhe_guest_enter
  14. kvm_arm_vhe_guest_exit
  15. kvm_arm_harden_branch_predictor
  16. kvm_arm_have_ssbd

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2012,2013 - ARM Ltd
   4  * Author: Marc Zyngier <marc.zyngier@arm.com>
   5  *
   6  * Derived from arch/arm/include/asm/kvm_host.h:
   7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   9  */
  10 
  11 #ifndef __ARM64_KVM_HOST_H__
  12 #define __ARM64_KVM_HOST_H__
  13 
  14 #include <linux/bitmap.h>
  15 #include <linux/types.h>
  16 #include <linux/jump_label.h>
  17 #include <linux/kvm_types.h>
  18 #include <linux/percpu.h>
  19 #include <asm/arch_gicv3.h>
  20 #include <asm/barrier.h>
  21 #include <asm/cpufeature.h>
  22 #include <asm/cputype.h>
  23 #include <asm/daifflags.h>
  24 #include <asm/fpsimd.h>
  25 #include <asm/kvm.h>
  26 #include <asm/kvm_asm.h>
  27 #include <asm/kvm_mmio.h>
  28 #include <asm/thread_info.h>
  29 
  30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
  31 
  32 #define KVM_USER_MEM_SLOTS 512
  33 #define KVM_HALT_POLL_NS_DEFAULT 500000
  34 
  35 #include <kvm/arm_vgic.h>
  36 #include <kvm/arm_arch_timer.h>
  37 #include <kvm/arm_pmu.h>
  38 
  39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
  40 
  41 #define KVM_VCPU_MAX_FEATURES 7
  42 
  43 #define KVM_REQ_SLEEP \
  44         KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  45 #define KVM_REQ_IRQ_PENDING     KVM_ARCH_REQ(1)
  46 #define KVM_REQ_VCPU_RESET      KVM_ARCH_REQ(2)
  47 
  48 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
  49 
  50 extern unsigned int kvm_sve_max_vl;
  51 int kvm_arm_init_sve(void);
  52 
  53 int __attribute_const__ kvm_target_cpu(void);
  54 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
  55 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
  56 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
  57 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
  58 
  59 struct kvm_vmid {
  60         /* The VMID generation used for the virt. memory system */
  61         u64    vmid_gen;
  62         u32    vmid;
  63 };
  64 
  65 struct kvm_arch {
  66         struct kvm_vmid vmid;
  67 
  68         /* stage2 entry level table */
  69         pgd_t *pgd;
  70         phys_addr_t pgd_phys;
  71 
  72         /* VTCR_EL2 value for this VM */
  73         u64    vtcr;
  74 
  75         /* The last vcpu id that ran on each physical CPU */
  76         int __percpu *last_vcpu_ran;
  77 
  78         /* The maximum number of vCPUs depends on the used GIC model */
  79         int max_vcpus;
  80 
  81         /* Interrupt controller */
  82         struct vgic_dist        vgic;
  83 
  84         /* Mandated version of PSCI */
  85         u32 psci_version;
  86 };
  87 
  88 #define KVM_NR_MEM_OBJS     40
  89 
  90 /*
  91  * We don't want allocation failures within the mmu code, so we preallocate
  92  * enough memory for a single page fault in a cache.
  93  */
  94 struct kvm_mmu_memory_cache {
  95         int nobjs;
  96         void *objects[KVM_NR_MEM_OBJS];
  97 };
  98 
  99 struct kvm_vcpu_fault_info {
 100         u32 esr_el2;            /* Hyp Syndrom Register */
 101         u64 far_el2;            /* Hyp Fault Address Register */
 102         u64 hpfar_el2;          /* Hyp IPA Fault Address Register */
 103         u64 disr_el1;           /* Deferred [SError] Status Register */
 104 };
 105 
 106 /*
 107  * 0 is reserved as an invalid value.
 108  * Order should be kept in sync with the save/restore code.
 109  */
 110 enum vcpu_sysreg {
 111         __INVALID_SYSREG__,
 112         MPIDR_EL1,      /* MultiProcessor Affinity Register */
 113         CSSELR_EL1,     /* Cache Size Selection Register */
 114         SCTLR_EL1,      /* System Control Register */
 115         ACTLR_EL1,      /* Auxiliary Control Register */
 116         CPACR_EL1,      /* Coprocessor Access Control */
 117         ZCR_EL1,        /* SVE Control */
 118         TTBR0_EL1,      /* Translation Table Base Register 0 */
 119         TTBR1_EL1,      /* Translation Table Base Register 1 */
 120         TCR_EL1,        /* Translation Control Register */
 121         ESR_EL1,        /* Exception Syndrome Register */
 122         AFSR0_EL1,      /* Auxiliary Fault Status Register 0 */
 123         AFSR1_EL1,      /* Auxiliary Fault Status Register 1 */
 124         FAR_EL1,        /* Fault Address Register */
 125         MAIR_EL1,       /* Memory Attribute Indirection Register */
 126         VBAR_EL1,       /* Vector Base Address Register */
 127         CONTEXTIDR_EL1, /* Context ID Register */
 128         TPIDR_EL0,      /* Thread ID, User R/W */
 129         TPIDRRO_EL0,    /* Thread ID, User R/O */
 130         TPIDR_EL1,      /* Thread ID, Privileged */
 131         AMAIR_EL1,      /* Aux Memory Attribute Indirection Register */
 132         CNTKCTL_EL1,    /* Timer Control Register (EL1) */
 133         PAR_EL1,        /* Physical Address Register */
 134         MDSCR_EL1,      /* Monitor Debug System Control Register */
 135         MDCCINT_EL1,    /* Monitor Debug Comms Channel Interrupt Enable Reg */
 136         DISR_EL1,       /* Deferred Interrupt Status Register */
 137 
 138         /* Performance Monitors Registers */
 139         PMCR_EL0,       /* Control Register */
 140         PMSELR_EL0,     /* Event Counter Selection Register */
 141         PMEVCNTR0_EL0,  /* Event Counter Register (0-30) */
 142         PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
 143         PMCCNTR_EL0,    /* Cycle Counter Register */
 144         PMEVTYPER0_EL0, /* Event Type Register (0-30) */
 145         PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
 146         PMCCFILTR_EL0,  /* Cycle Count Filter Register */
 147         PMCNTENSET_EL0, /* Count Enable Set Register */
 148         PMINTENSET_EL1, /* Interrupt Enable Set Register */
 149         PMOVSSET_EL0,   /* Overflow Flag Status Set Register */
 150         PMSWINC_EL0,    /* Software Increment Register */
 151         PMUSERENR_EL0,  /* User Enable Register */
 152 
 153         /* Pointer Authentication Registers in a strict increasing order. */
 154         APIAKEYLO_EL1,
 155         APIAKEYHI_EL1,
 156         APIBKEYLO_EL1,
 157         APIBKEYHI_EL1,
 158         APDAKEYLO_EL1,
 159         APDAKEYHI_EL1,
 160         APDBKEYLO_EL1,
 161         APDBKEYHI_EL1,
 162         APGAKEYLO_EL1,
 163         APGAKEYHI_EL1,
 164 
 165         /* 32bit specific registers. Keep them at the end of the range */
 166         DACR32_EL2,     /* Domain Access Control Register */
 167         IFSR32_EL2,     /* Instruction Fault Status Register */
 168         FPEXC32_EL2,    /* Floating-Point Exception Control Register */
 169         DBGVCR32_EL2,   /* Debug Vector Catch Register */
 170 
 171         NR_SYS_REGS     /* Nothing after this line! */
 172 };
 173 
 174 /* 32bit mapping */
 175 #define c0_MPIDR        (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
 176 #define c0_CSSELR       (CSSELR_EL1 * 2)/* Cache Size Selection Register */
 177 #define c1_SCTLR        (SCTLR_EL1 * 2) /* System Control Register */
 178 #define c1_ACTLR        (ACTLR_EL1 * 2) /* Auxiliary Control Register */
 179 #define c1_CPACR        (CPACR_EL1 * 2) /* Coprocessor Access Control */
 180 #define c2_TTBR0        (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
 181 #define c2_TTBR0_high   (c2_TTBR0 + 1)  /* TTBR0 top 32 bits */
 182 #define c2_TTBR1        (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
 183 #define c2_TTBR1_high   (c2_TTBR1 + 1)  /* TTBR1 top 32 bits */
 184 #define c2_TTBCR        (TCR_EL1 * 2)   /* Translation Table Base Control R. */
 185 #define c3_DACR         (DACR32_EL2 * 2)/* Domain Access Control Register */
 186 #define c5_DFSR         (ESR_EL1 * 2)   /* Data Fault Status Register */
 187 #define c5_IFSR         (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
 188 #define c5_ADFSR        (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
 189 #define c5_AIFSR        (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
 190 #define c6_DFAR         (FAR_EL1 * 2)   /* Data Fault Address Register */
 191 #define c6_IFAR         (c6_DFAR + 1)   /* Instruction Fault Address Register */
 192 #define c7_PAR          (PAR_EL1 * 2)   /* Physical Address Register */
 193 #define c7_PAR_high     (c7_PAR + 1)    /* PAR top 32 bits */
 194 #define c10_PRRR        (MAIR_EL1 * 2)  /* Primary Region Remap Register */
 195 #define c10_NMRR        (c10_PRRR + 1)  /* Normal Memory Remap Register */
 196 #define c12_VBAR        (VBAR_EL1 * 2)  /* Vector Base Address Register */
 197 #define c13_CID         (CONTEXTIDR_EL1 * 2)    /* Context ID Register */
 198 #define c13_TID_URW     (TPIDR_EL0 * 2) /* Thread ID, User R/W */
 199 #define c13_TID_URO     (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
 200 #define c13_TID_PRIV    (TPIDR_EL1 * 2) /* Thread ID, Privileged */
 201 #define c10_AMAIR0      (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
 202 #define c10_AMAIR1      (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
 203 #define c14_CNTKCTL     (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
 204 
 205 #define cp14_DBGDSCRext (MDSCR_EL1 * 2)
 206 #define cp14_DBGBCR0    (DBGBCR0_EL1 * 2)
 207 #define cp14_DBGBVR0    (DBGBVR0_EL1 * 2)
 208 #define cp14_DBGBXVR0   (cp14_DBGBVR0 + 1)
 209 #define cp14_DBGWCR0    (DBGWCR0_EL1 * 2)
 210 #define cp14_DBGWVR0    (DBGWVR0_EL1 * 2)
 211 #define cp14_DBGDCCINT  (MDCCINT_EL1 * 2)
 212 
 213 #define NR_COPRO_REGS   (NR_SYS_REGS * 2)
 214 
 215 struct kvm_cpu_context {
 216         struct kvm_regs gp_regs;
 217         union {
 218                 u64 sys_regs[NR_SYS_REGS];
 219                 u32 copro[NR_COPRO_REGS];
 220         };
 221 
 222         struct kvm_vcpu *__hyp_running_vcpu;
 223 };
 224 
 225 struct kvm_pmu_events {
 226         u32 events_host;
 227         u32 events_guest;
 228 };
 229 
 230 struct kvm_host_data {
 231         struct kvm_cpu_context host_ctxt;
 232         struct kvm_pmu_events pmu_events;
 233 };
 234 
 235 typedef struct kvm_host_data kvm_host_data_t;
 236 
 237 struct vcpu_reset_state {
 238         unsigned long   pc;
 239         unsigned long   r0;
 240         bool            be;
 241         bool            reset;
 242 };
 243 
 244 struct kvm_vcpu_arch {
 245         struct kvm_cpu_context ctxt;
 246         void *sve_state;
 247         unsigned int sve_max_vl;
 248 
 249         /* HYP configuration */
 250         u64 hcr_el2;
 251         u32 mdcr_el2;
 252 
 253         /* Exception Information */
 254         struct kvm_vcpu_fault_info fault;
 255 
 256         /* State of various workarounds, see kvm_asm.h for bit assignment */
 257         u64 workaround_flags;
 258 
 259         /* Miscellaneous vcpu state flags */
 260         u64 flags;
 261 
 262         /*
 263          * We maintain more than a single set of debug registers to support
 264          * debugging the guest from the host and to maintain separate host and
 265          * guest state during world switches. vcpu_debug_state are the debug
 266          * registers of the vcpu as the guest sees them.  host_debug_state are
 267          * the host registers which are saved and restored during
 268          * world switches. external_debug_state contains the debug
 269          * values we want to debug the guest. This is set via the
 270          * KVM_SET_GUEST_DEBUG ioctl.
 271          *
 272          * debug_ptr points to the set of debug registers that should be loaded
 273          * onto the hardware when running the guest.
 274          */
 275         struct kvm_guest_debug_arch *debug_ptr;
 276         struct kvm_guest_debug_arch vcpu_debug_state;
 277         struct kvm_guest_debug_arch external_debug_state;
 278 
 279         /* Pointer to host CPU context */
 280         struct kvm_cpu_context *host_cpu_context;
 281 
 282         struct thread_info *host_thread_info;   /* hyp VA */
 283         struct user_fpsimd_state *host_fpsimd_state;    /* hyp VA */
 284 
 285         struct {
 286                 /* {Break,watch}point registers */
 287                 struct kvm_guest_debug_arch regs;
 288                 /* Statistical profiling extension */
 289                 u64 pmscr_el1;
 290         } host_debug_state;
 291 
 292         /* VGIC state */
 293         struct vgic_cpu vgic_cpu;
 294         struct arch_timer_cpu timer_cpu;
 295         struct kvm_pmu pmu;
 296 
 297         /*
 298          * Anything that is not used directly from assembly code goes
 299          * here.
 300          */
 301 
 302         /*
 303          * Guest registers we preserve during guest debugging.
 304          *
 305          * These shadow registers are updated by the kvm_handle_sys_reg
 306          * trap handler if the guest accesses or updates them while we
 307          * are using guest debug.
 308          */
 309         struct {
 310                 u32     mdscr_el1;
 311         } guest_debug_preserved;
 312 
 313         /* vcpu power-off state */
 314         bool power_off;
 315 
 316         /* Don't run the guest (internal implementation need) */
 317         bool pause;
 318 
 319         /* IO related fields */
 320         struct kvm_decode mmio_decode;
 321 
 322         /* Cache some mmu pages needed inside spinlock regions */
 323         struct kvm_mmu_memory_cache mmu_page_cache;
 324 
 325         /* Target CPU and feature flags */
 326         int target;
 327         DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
 328 
 329         /* Detect first run of a vcpu */
 330         bool has_run_once;
 331 
 332         /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
 333         u64 vsesr_el2;
 334 
 335         /* Additional reset state */
 336         struct vcpu_reset_state reset_state;
 337 
 338         /* True when deferrable sysregs are loaded on the physical CPU,
 339          * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
 340         bool sysregs_loaded_on_cpu;
 341 };
 342 
 343 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
 344 #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
 345                                       sve_ffr_offset((vcpu)->arch.sve_max_vl)))
 346 
 347 #define vcpu_sve_state_size(vcpu) ({                                    \
 348         size_t __size_ret;                                              \
 349         unsigned int __vcpu_vq;                                         \
 350                                                                         \
 351         if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {          \
 352                 __size_ret = 0;                                         \
 353         } else {                                                        \
 354                 __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl);    \
 355                 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);              \
 356         }                                                               \
 357                                                                         \
 358         __size_ret;                                                     \
 359 })
 360 
 361 /* vcpu_arch flags field values: */
 362 #define KVM_ARM64_DEBUG_DIRTY           (1 << 0)
 363 #define KVM_ARM64_FP_ENABLED            (1 << 1) /* guest FP regs loaded */
 364 #define KVM_ARM64_FP_HOST               (1 << 2) /* host FP regs loaded */
 365 #define KVM_ARM64_HOST_SVE_IN_USE       (1 << 3) /* backup for host TIF_SVE */
 366 #define KVM_ARM64_HOST_SVE_ENABLED      (1 << 4) /* SVE enabled for EL0 */
 367 #define KVM_ARM64_GUEST_HAS_SVE         (1 << 5) /* SVE exposed to guest */
 368 #define KVM_ARM64_VCPU_SVE_FINALIZED    (1 << 6) /* SVE config completed */
 369 #define KVM_ARM64_GUEST_HAS_PTRAUTH     (1 << 7) /* PTRAUTH exposed to guest */
 370 
 371 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
 372                             ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
 373 
 374 #define vcpu_has_ptrauth(vcpu)  ((system_supports_address_auth() || \
 375                                   system_supports_generic_auth()) && \
 376                                  ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
 377 
 378 #define vcpu_gp_regs(v)         (&(v)->arch.ctxt.gp_regs)
 379 
 380 /*
 381  * Only use __vcpu_sys_reg if you know you want the memory backed version of a
 382  * register, and not the one most recently accessed by a running VCPU.  For
 383  * example, for userspace access or for system registers that are never context
 384  * switched, but only emulated.
 385  */
 386 #define __vcpu_sys_reg(v,r)     ((v)->arch.ctxt.sys_regs[(r)])
 387 
 388 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
 389 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
 390 
 391 /*
 392  * CP14 and CP15 live in the same array, as they are backed by the
 393  * same system registers.
 394  */
 395 #define CPx_BIAS                IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
 396 
 397 #define vcpu_cp14(v,r)          ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
 398 #define vcpu_cp15(v,r)          ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
 399 
 400 struct kvm_vm_stat {
 401         ulong remote_tlb_flush;
 402 };
 403 
 404 struct kvm_vcpu_stat {
 405         u64 halt_successful_poll;
 406         u64 halt_attempted_poll;
 407         u64 halt_poll_invalid;
 408         u64 halt_wakeup;
 409         u64 hvc_exit_stat;
 410         u64 wfe_exit_stat;
 411         u64 wfi_exit_stat;
 412         u64 mmio_exit_user;
 413         u64 mmio_exit_kernel;
 414         u64 exits;
 415 };
 416 
 417 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
 418 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 419 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
 420 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 421 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 422 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
 423                               struct kvm_vcpu_events *events);
 424 
 425 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
 426                               struct kvm_vcpu_events *events);
 427 
 428 #define KVM_ARCH_WANT_MMU_NOTIFIER
 429 int kvm_unmap_hva_range(struct kvm *kvm,
 430                         unsigned long start, unsigned long end);
 431 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 432 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 433 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 434 
 435 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
 436 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
 437 void kvm_arm_halt_guest(struct kvm *kvm);
 438 void kvm_arm_resume_guest(struct kvm *kvm);
 439 
 440 u64 __kvm_call_hyp(void *hypfn, ...);
 441 
 442 /*
 443  * The couple of isb() below are there to guarantee the same behaviour
 444  * on VHE as on !VHE, where the eret to EL1 acts as a context
 445  * synchronization event.
 446  */
 447 #define kvm_call_hyp(f, ...)                                            \
 448         do {                                                            \
 449                 if (has_vhe()) {                                        \
 450                         f(__VA_ARGS__);                                 \
 451                         isb();                                          \
 452                 } else {                                                \
 453                         __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
 454                 }                                                       \
 455         } while(0)
 456 
 457 #define kvm_call_hyp_ret(f, ...)                                        \
 458         ({                                                              \
 459                 typeof(f(__VA_ARGS__)) ret;                             \
 460                                                                         \
 461                 if (has_vhe()) {                                        \
 462                         ret = f(__VA_ARGS__);                           \
 463                         isb();                                          \
 464                 } else {                                                \
 465                         ret = __kvm_call_hyp(kvm_ksym_ref(f),           \
 466                                              ##__VA_ARGS__);            \
 467                 }                                                       \
 468                                                                         \
 469                 ret;                                                    \
 470         })
 471 
 472 void force_vm_exit(const cpumask_t *mask);
 473 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 474 
 475 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 476                 int exception_index);
 477 void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
 478                        int exception_index);
 479 
 480 int kvm_perf_init(void);
 481 int kvm_perf_teardown(void);
 482 
 483 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
 484 
 485 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 486 
 487 DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
 488 
 489 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
 490 {
 491         /* The host's MPIDR is immutable, so let's set it up at boot time */
 492         cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
 493 }
 494 
 495 void __kvm_enable_ssbs(void);
 496 
 497 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
 498                                        unsigned long hyp_stack_ptr,
 499                                        unsigned long vector_ptr)
 500 {
 501         /*
 502          * Calculate the raw per-cpu offset without a translation from the
 503          * kernel's mapping to the linear mapping, and store it in tpidr_el2
 504          * so that we can use adr_l to access per-cpu variables in EL2.
 505          */
 506         u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
 507                          (u64)kvm_ksym_ref(kvm_host_data));
 508 
 509         /*
 510          * Call initialization code, and switch to the full blown HYP code.
 511          * If the cpucaps haven't been finalized yet, something has gone very
 512          * wrong, and hyp will crash and burn when it uses any
 513          * cpus_have_const_cap() wrapper.
 514          */
 515         BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
 516         __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
 517 
 518         /*
 519          * Disabling SSBD on a non-VHE system requires us to enable SSBS
 520          * at EL2.
 521          */
 522         if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
 523             arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
 524                 kvm_call_hyp(__kvm_enable_ssbs);
 525         }
 526 }
 527 
 528 static inline bool kvm_arch_requires_vhe(void)
 529 {
 530         /*
 531          * The Arm architecture specifies that implementation of SVE
 532          * requires VHE also to be implemented.  The KVM code for arm64
 533          * relies on this when SVE is present:
 534          */
 535         if (system_supports_sve())
 536                 return true;
 537 
 538         /* Some implementations have defects that confine them to VHE */
 539         if (cpus_have_cap(ARM64_WORKAROUND_1165522))
 540                 return true;
 541 
 542         return false;
 543 }
 544 
 545 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
 546 
 547 static inline void kvm_arch_hardware_unsetup(void) {}
 548 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 549 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 550 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 551 
 552 void kvm_arm_init_debug(void);
 553 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
 554 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
 555 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
 556 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
 557                                struct kvm_device_attr *attr);
 558 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
 559                                struct kvm_device_attr *attr);
 560 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
 561                                struct kvm_device_attr *attr);
 562 
 563 static inline void __cpu_init_stage2(void) {}
 564 
 565 /* Guest/host FPSIMD coordination helpers */
 566 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
 567 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
 568 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
 569 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
 570 
 571 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
 572 {
 573         return (!has_vhe() && attr->exclude_host);
 574 }
 575 
 576 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
 577 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
 578 {
 579         return kvm_arch_vcpu_run_map_fp(vcpu);
 580 }
 581 
 582 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
 583 void kvm_clr_pmu_events(u32 clr);
 584 
 585 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
 586 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
 587 #else
 588 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
 589 static inline void kvm_clr_pmu_events(u32 clr) {}
 590 #endif
 591 
 592 static inline void kvm_arm_vhe_guest_enter(void)
 593 {
 594         local_daif_mask();
 595 
 596         /*
 597          * Having IRQs masked via PMR when entering the guest means the GIC
 598          * will not signal the CPU of interrupts of lower priority, and the
 599          * only way to get out will be via guest exceptions.
 600          * Naturally, we want to avoid this.
 601          *
 602          * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
 603          * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
 604          */
 605         if (system_uses_irq_prio_masking())
 606                 dsb(sy);
 607 }
 608 
 609 static inline void kvm_arm_vhe_guest_exit(void)
 610 {
 611         /*
 612          * local_daif_restore() takes care to properly restore PSTATE.DAIF
 613          * and the GIC PMR if the host is using IRQ priorities.
 614          */
 615         local_daif_restore(DAIF_PROCCTX_NOIRQ);
 616 
 617         /*
 618          * When we exit from the guest we change a number of CPU configuration
 619          * parameters, such as traps.  Make sure these changes take effect
 620          * before running the host or additional guests.
 621          */
 622         isb();
 623 }
 624 
 625 #define KVM_BP_HARDEN_UNKNOWN           -1
 626 #define KVM_BP_HARDEN_WA_NEEDED         0
 627 #define KVM_BP_HARDEN_NOT_REQUIRED      1
 628 
 629 static inline int kvm_arm_harden_branch_predictor(void)
 630 {
 631         switch (get_spectre_v2_workaround_state()) {
 632         case ARM64_BP_HARDEN_WA_NEEDED:
 633                 return KVM_BP_HARDEN_WA_NEEDED;
 634         case ARM64_BP_HARDEN_NOT_REQUIRED:
 635                 return KVM_BP_HARDEN_NOT_REQUIRED;
 636         case ARM64_BP_HARDEN_UNKNOWN:
 637         default:
 638                 return KVM_BP_HARDEN_UNKNOWN;
 639         }
 640 }
 641 
 642 #define KVM_SSBD_UNKNOWN                -1
 643 #define KVM_SSBD_FORCE_DISABLE          0
 644 #define KVM_SSBD_KERNEL         1
 645 #define KVM_SSBD_FORCE_ENABLE           2
 646 #define KVM_SSBD_MITIGATED              3
 647 
 648 static inline int kvm_arm_have_ssbd(void)
 649 {
 650         switch (arm64_get_ssbd_state()) {
 651         case ARM64_SSBD_FORCE_DISABLE:
 652                 return KVM_SSBD_FORCE_DISABLE;
 653         case ARM64_SSBD_KERNEL:
 654                 return KVM_SSBD_KERNEL;
 655         case ARM64_SSBD_FORCE_ENABLE:
 656                 return KVM_SSBD_FORCE_ENABLE;
 657         case ARM64_SSBD_MITIGATED:
 658                 return KVM_SSBD_MITIGATED;
 659         case ARM64_SSBD_UNKNOWN:
 660         default:
 661                 return KVM_SSBD_UNKNOWN;
 662         }
 663 }
 664 
 665 void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
 666 void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
 667 
 668 void kvm_set_ipa_limit(void);
 669 
 670 #define __KVM_HAVE_ARCH_VM_ALLOC
 671 struct kvm *kvm_arch_alloc_vm(void);
 672 void kvm_arch_free_vm(struct kvm *kvm);
 673 
 674 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
 675 
 676 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
 677 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
 678 
 679 #define kvm_arm_vcpu_sve_finalized(vcpu) \
 680         ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
 681 
 682 #define kvm_arm_vcpu_loaded(vcpu)       ((vcpu)->arch.sysregs_loaded_on_cpu)
 683 
 684 #endif /* __ARM64_KVM_HOST_H__ */

/* [<][>][^][v][top][bottom][index][help] */