root/arch/arm64/kvm/hyp/switch.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. update_fp_enabled
  2. __fpsimd_save_fpexc32
  3. __activate_traps_fpsimd32
  4. __activate_traps_common
  5. __deactivate_traps_common
  6. activate_traps_vhe
  7. __activate_traps_nvhe
  8. __activate_traps
  9. deactivate_traps_vhe
  10. __deactivate_traps_nvhe
  11. __deactivate_traps
  12. activate_traps_vhe_load
  13. deactivate_traps_vhe_put
  14. __activate_vm
  15. __deactivate_vm
  16. __hyp_vgic_save_state
  17. __hyp_vgic_restore_state
  18. __translate_far_to_hpfar
  19. __populate_fault_info
  20. __hyp_handle_fpsimd
  21. handle_tx2_tvm
  22. fixup_guest_exit
  23. __needs_ssbd_off
  24. __set_guest_arch_workaround_state
  25. __set_host_arch_workaround_state
  26. __pmu_switch_to_guest
  27. __pmu_switch_to_host
  28. kvm_vcpu_run_vhe
  29. __kvm_vcpu_run_nvhe
  30. __hyp_call_panic_nvhe
  31. __hyp_call_panic_vhe
  32. hyp_panic

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2015 - ARM Ltd
   4  * Author: Marc Zyngier <marc.zyngier@arm.com>
   5  */
   6 
   7 #include <linux/arm-smccc.h>
   8 #include <linux/kvm_host.h>
   9 #include <linux/types.h>
  10 #include <linux/jump_label.h>
  11 #include <uapi/linux/psci.h>
  12 
  13 #include <kvm/arm_psci.h>
  14 
  15 #include <asm/arch_gicv3.h>
  16 #include <asm/cpufeature.h>
  17 #include <asm/kprobes.h>
  18 #include <asm/kvm_asm.h>
  19 #include <asm/kvm_emulate.h>
  20 #include <asm/kvm_host.h>
  21 #include <asm/kvm_hyp.h>
  22 #include <asm/kvm_mmu.h>
  23 #include <asm/fpsimd.h>
  24 #include <asm/debug-monitors.h>
  25 #include <asm/processor.h>
  26 #include <asm/thread_info.h>
  27 
  28 /* Check whether the FP regs were dirtied while in the host-side run loop: */
  29 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
  30 {
  31         /*
  32          * When the system doesn't support FP/SIMD, we cannot rely on
  33          * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
  34          * abort on the very first access to FP and thus we should never
  35          * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
  36          * trap the accesses.
  37          */
  38         if (!system_supports_fpsimd() ||
  39             vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
  40                 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
  41                                       KVM_ARM64_FP_HOST);
  42 
  43         return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
  44 }
  45 
  46 /* Save the 32-bit only FPSIMD system register state */
  47 static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
  48 {
  49         if (!vcpu_el1_is_32bit(vcpu))
  50                 return;
  51 
  52         vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
  53 }
  54 
  55 static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
  56 {
  57         /*
  58          * We are about to set CPTR_EL2.TFP to trap all floating point
  59          * register accesses to EL2, however, the ARM ARM clearly states that
  60          * traps are only taken to EL2 if the operation would not otherwise
  61          * trap to EL1.  Therefore, always make sure that for 32-bit guests,
  62          * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
  63          * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
  64          * it will cause an exception.
  65          */
  66         if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
  67                 write_sysreg(1 << 30, fpexc32_el2);
  68                 isb();
  69         }
  70 }
  71 
  72 static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
  73 {
  74         /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
  75         write_sysreg(1 << 15, hstr_el2);
  76 
  77         /*
  78          * Make sure we trap PMU access from EL0 to EL2. Also sanitize
  79          * PMSELR_EL0 to make sure it never contains the cycle
  80          * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
  81          * EL1 instead of being trapped to EL2.
  82          */
  83         write_sysreg(0, pmselr_el0);
  84         write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
  85         write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
  86 }
  87 
  88 static void __hyp_text __deactivate_traps_common(void)
  89 {
  90         write_sysreg(0, hstr_el2);
  91         write_sysreg(0, pmuserenr_el0);
  92 }
  93 
  94 static void activate_traps_vhe(struct kvm_vcpu *vcpu)
  95 {
  96         u64 val;
  97 
  98         val = read_sysreg(cpacr_el1);
  99         val |= CPACR_EL1_TTA;
 100         val &= ~CPACR_EL1_ZEN;
 101         if (update_fp_enabled(vcpu)) {
 102                 if (vcpu_has_sve(vcpu))
 103                         val |= CPACR_EL1_ZEN;
 104         } else {
 105                 val &= ~CPACR_EL1_FPEN;
 106                 __activate_traps_fpsimd32(vcpu);
 107         }
 108 
 109         write_sysreg(val, cpacr_el1);
 110 
 111         write_sysreg(kvm_get_hyp_vector(), vbar_el1);
 112 }
 113 NOKPROBE_SYMBOL(activate_traps_vhe);
 114 
 115 static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
 116 {
 117         u64 val;
 118 
 119         __activate_traps_common(vcpu);
 120 
 121         val = CPTR_EL2_DEFAULT;
 122         val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
 123         if (!update_fp_enabled(vcpu)) {
 124                 val |= CPTR_EL2_TFP;
 125                 __activate_traps_fpsimd32(vcpu);
 126         }
 127 
 128         write_sysreg(val, cptr_el2);
 129 }
 130 
 131 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
 132 {
 133         u64 hcr = vcpu->arch.hcr_el2;
 134 
 135         if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
 136                 hcr |= HCR_TVM;
 137 
 138         write_sysreg(hcr, hcr_el2);
 139 
 140         if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
 141                 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
 142 
 143         if (has_vhe())
 144                 activate_traps_vhe(vcpu);
 145         else
 146                 __activate_traps_nvhe(vcpu);
 147 }
 148 
 149 static void deactivate_traps_vhe(void)
 150 {
 151         extern char vectors[];  /* kernel exception vectors */
 152         write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
 153 
 154         /*
 155          * ARM erratum 1165522 requires the actual execution of the above
 156          * before we can switch to the EL2/EL0 translation regime used by
 157          * the host.
 158          */
 159         asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
 160 
 161         write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
 162         write_sysreg(vectors, vbar_el1);
 163 }
 164 NOKPROBE_SYMBOL(deactivate_traps_vhe);
 165 
 166 static void __hyp_text __deactivate_traps_nvhe(void)
 167 {
 168         u64 mdcr_el2 = read_sysreg(mdcr_el2);
 169 
 170         __deactivate_traps_common();
 171 
 172         mdcr_el2 &= MDCR_EL2_HPMN_MASK;
 173         mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
 174 
 175         write_sysreg(mdcr_el2, mdcr_el2);
 176         write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
 177         write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
 178 }
 179 
 180 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
 181 {
 182         /*
 183          * If we pended a virtual abort, preserve it until it gets
 184          * cleared. See D1.14.3 (Virtual Interrupts) for details, but
 185          * the crucial bit is "On taking a vSError interrupt,
 186          * HCR_EL2.VSE is cleared to 0."
 187          */
 188         if (vcpu->arch.hcr_el2 & HCR_VSE) {
 189                 vcpu->arch.hcr_el2 &= ~HCR_VSE;
 190                 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
 191         }
 192 
 193         if (has_vhe())
 194                 deactivate_traps_vhe();
 195         else
 196                 __deactivate_traps_nvhe();
 197 }
 198 
 199 void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
 200 {
 201         __activate_traps_common(vcpu);
 202 }
 203 
 204 void deactivate_traps_vhe_put(void)
 205 {
 206         u64 mdcr_el2 = read_sysreg(mdcr_el2);
 207 
 208         mdcr_el2 &= MDCR_EL2_HPMN_MASK |
 209                     MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
 210                     MDCR_EL2_TPMS;
 211 
 212         write_sysreg(mdcr_el2, mdcr_el2);
 213 
 214         __deactivate_traps_common();
 215 }
 216 
 217 static void __hyp_text __activate_vm(struct kvm *kvm)
 218 {
 219         __load_guest_stage2(kvm);
 220 }
 221 
 222 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
 223 {
 224         write_sysreg(0, vttbr_el2);
 225 }
 226 
 227 /* Save VGICv3 state on non-VHE systems */
 228 static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
 229 {
 230         if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
 231                 __vgic_v3_save_state(vcpu);
 232                 __vgic_v3_deactivate_traps(vcpu);
 233         }
 234 }
 235 
 236 /* Restore VGICv3 state on non_VEH systems */
 237 static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
 238 {
 239         if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
 240                 __vgic_v3_activate_traps(vcpu);
 241                 __vgic_v3_restore_state(vcpu);
 242         }
 243 }
 244 
 245 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
 246 {
 247         u64 par, tmp;
 248 
 249         /*
 250          * Resolve the IPA the hard way using the guest VA.
 251          *
 252          * Stage-1 translation already validated the memory access
 253          * rights. As such, we can use the EL1 translation regime, and
 254          * don't have to distinguish between EL0 and EL1 access.
 255          *
 256          * We do need to save/restore PAR_EL1 though, as we haven't
 257          * saved the guest context yet, and we may return early...
 258          */
 259         par = read_sysreg(par_el1);
 260         asm volatile("at s1e1r, %0" : : "r" (far));
 261         isb();
 262 
 263         tmp = read_sysreg(par_el1);
 264         write_sysreg(par, par_el1);
 265 
 266         if (unlikely(tmp & SYS_PAR_EL1_F))
 267                 return false; /* Translation failed, back to guest */
 268 
 269         /* Convert PAR to HPFAR format */
 270         *hpfar = PAR_TO_HPFAR(tmp);
 271         return true;
 272 }
 273 
 274 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
 275 {
 276         u8 ec;
 277         u64 esr;
 278         u64 hpfar, far;
 279 
 280         esr = vcpu->arch.fault.esr_el2;
 281         ec = ESR_ELx_EC(esr);
 282 
 283         if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
 284                 return true;
 285 
 286         far = read_sysreg_el2(SYS_FAR);
 287 
 288         /*
 289          * The HPFAR can be invalid if the stage 2 fault did not
 290          * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
 291          * bit is clear) and one of the two following cases are true:
 292          *   1. The fault was due to a permission fault
 293          *   2. The processor carries errata 834220
 294          *
 295          * Therefore, for all non S1PTW faults where we either have a
 296          * permission fault or the errata workaround is enabled, we
 297          * resolve the IPA using the AT instruction.
 298          */
 299         if (!(esr & ESR_ELx_S1PTW) &&
 300             (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
 301              (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
 302                 if (!__translate_far_to_hpfar(far, &hpfar))
 303                         return false;
 304         } else {
 305                 hpfar = read_sysreg(hpfar_el2);
 306         }
 307 
 308         vcpu->arch.fault.far_el2 = far;
 309         vcpu->arch.fault.hpfar_el2 = hpfar;
 310         return true;
 311 }
 312 
 313 /* Check for an FPSIMD/SVE trap and handle as appropriate */
 314 static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
 315 {
 316         bool vhe, sve_guest, sve_host;
 317         u8 hsr_ec;
 318 
 319         if (!system_supports_fpsimd())
 320                 return false;
 321 
 322         if (system_supports_sve()) {
 323                 sve_guest = vcpu_has_sve(vcpu);
 324                 sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
 325                 vhe = true;
 326         } else {
 327                 sve_guest = false;
 328                 sve_host = false;
 329                 vhe = has_vhe();
 330         }
 331 
 332         hsr_ec = kvm_vcpu_trap_get_class(vcpu);
 333         if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
 334             hsr_ec != ESR_ELx_EC_SVE)
 335                 return false;
 336 
 337         /* Don't handle SVE traps for non-SVE vcpus here: */
 338         if (!sve_guest)
 339                 if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
 340                         return false;
 341 
 342         /* Valid trap.  Switch the context: */
 343 
 344         if (vhe) {
 345                 u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
 346 
 347                 if (sve_guest)
 348                         reg |= CPACR_EL1_ZEN;
 349 
 350                 write_sysreg(reg, cpacr_el1);
 351         } else {
 352                 write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
 353                              cptr_el2);
 354         }
 355 
 356         isb();
 357 
 358         if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
 359                 /*
 360                  * In the SVE case, VHE is assumed: it is enforced by
 361                  * Kconfig and kvm_arch_init().
 362                  */
 363                 if (sve_host) {
 364                         struct thread_struct *thread = container_of(
 365                                 vcpu->arch.host_fpsimd_state,
 366                                 struct thread_struct, uw.fpsimd_state);
 367 
 368                         sve_save_state(sve_pffr(thread),
 369                                        &vcpu->arch.host_fpsimd_state->fpsr);
 370                 } else {
 371                         __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
 372                 }
 373 
 374                 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
 375         }
 376 
 377         if (sve_guest) {
 378                 sve_load_state(vcpu_sve_pffr(vcpu),
 379                                &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
 380                                sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
 381                 write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
 382         } else {
 383                 __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
 384         }
 385 
 386         /* Skip restoring fpexc32 for AArch64 guests */
 387         if (!(read_sysreg(hcr_el2) & HCR_RW))
 388                 write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
 389                              fpexc32_el2);
 390 
 391         vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
 392 
 393         return true;
 394 }
 395 
 396 static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
 397 {
 398         u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
 399         int rt = kvm_vcpu_sys_get_rt(vcpu);
 400         u64 val = vcpu_get_reg(vcpu, rt);
 401 
 402         /*
 403          * The normal sysreg handling code expects to see the traps,
 404          * let's not do anything here.
 405          */
 406         if (vcpu->arch.hcr_el2 & HCR_TVM)
 407                 return false;
 408 
 409         switch (sysreg) {
 410         case SYS_SCTLR_EL1:
 411                 write_sysreg_el1(val, SYS_SCTLR);
 412                 break;
 413         case SYS_TTBR0_EL1:
 414                 write_sysreg_el1(val, SYS_TTBR0);
 415                 break;
 416         case SYS_TTBR1_EL1:
 417                 write_sysreg_el1(val, SYS_TTBR1);
 418                 break;
 419         case SYS_TCR_EL1:
 420                 write_sysreg_el1(val, SYS_TCR);
 421                 break;
 422         case SYS_ESR_EL1:
 423                 write_sysreg_el1(val, SYS_ESR);
 424                 break;
 425         case SYS_FAR_EL1:
 426                 write_sysreg_el1(val, SYS_FAR);
 427                 break;
 428         case SYS_AFSR0_EL1:
 429                 write_sysreg_el1(val, SYS_AFSR0);
 430                 break;
 431         case SYS_AFSR1_EL1:
 432                 write_sysreg_el1(val, SYS_AFSR1);
 433                 break;
 434         case SYS_MAIR_EL1:
 435                 write_sysreg_el1(val, SYS_MAIR);
 436                 break;
 437         case SYS_AMAIR_EL1:
 438                 write_sysreg_el1(val, SYS_AMAIR);
 439                 break;
 440         case SYS_CONTEXTIDR_EL1:
 441                 write_sysreg_el1(val, SYS_CONTEXTIDR);
 442                 break;
 443         default:
 444                 return false;
 445         }
 446 
 447         __kvm_skip_instr(vcpu);
 448         return true;
 449 }
 450 
 451 /*
 452  * Return true when we were able to fixup the guest exit and should return to
 453  * the guest, false when we should restore the host state and return to the
 454  * main run loop.
 455  */
 456 static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
 457 {
 458         if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
 459                 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
 460 
 461         /*
 462          * We're using the raw exception code in order to only process
 463          * the trap if no SError is pending. We will come back to the
 464          * same PC once the SError has been injected, and replay the
 465          * trapping instruction.
 466          */
 467         if (*exit_code != ARM_EXCEPTION_TRAP)
 468                 goto exit;
 469 
 470         if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
 471             kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
 472             handle_tx2_tvm(vcpu))
 473                 return true;
 474 
 475         /*
 476          * We trap the first access to the FP/SIMD to save the host context
 477          * and restore the guest context lazily.
 478          * If FP/SIMD is not implemented, handle the trap and inject an
 479          * undefined instruction exception to the guest.
 480          * Similarly for trapped SVE accesses.
 481          */
 482         if (__hyp_handle_fpsimd(vcpu))
 483                 return true;
 484 
 485         if (!__populate_fault_info(vcpu))
 486                 return true;
 487 
 488         if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
 489                 bool valid;
 490 
 491                 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
 492                         kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
 493                         kvm_vcpu_dabt_isvalid(vcpu) &&
 494                         !kvm_vcpu_dabt_isextabt(vcpu) &&
 495                         !kvm_vcpu_dabt_iss1tw(vcpu);
 496 
 497                 if (valid) {
 498                         int ret = __vgic_v2_perform_cpuif_access(vcpu);
 499 
 500                         if (ret == 1)
 501                                 return true;
 502 
 503                         /* Promote an illegal access to an SError.*/
 504                         if (ret == -1)
 505                                 *exit_code = ARM_EXCEPTION_EL1_SERROR;
 506 
 507                         goto exit;
 508                 }
 509         }
 510 
 511         if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
 512             (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
 513              kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
 514                 int ret = __vgic_v3_perform_cpuif_access(vcpu);
 515 
 516                 if (ret == 1)
 517                         return true;
 518         }
 519 
 520 exit:
 521         /* Return to the host kernel and handle the exit */
 522         return false;
 523 }
 524 
 525 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
 526 {
 527         if (!cpus_have_const_cap(ARM64_SSBD))
 528                 return false;
 529 
 530         return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
 531 }
 532 
 533 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
 534 {
 535 #ifdef CONFIG_ARM64_SSBD
 536         /*
 537          * The host runs with the workaround always present. If the
 538          * guest wants it disabled, so be it...
 539          */
 540         if (__needs_ssbd_off(vcpu) &&
 541             __hyp_this_cpu_read(arm64_ssbd_callback_required))
 542                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
 543 #endif
 544 }
 545 
 546 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
 547 {
 548 #ifdef CONFIG_ARM64_SSBD
 549         /*
 550          * If the guest has disabled the workaround, bring it back on.
 551          */
 552         if (__needs_ssbd_off(vcpu) &&
 553             __hyp_this_cpu_read(arm64_ssbd_callback_required))
 554                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
 555 #endif
 556 }
 557 
 558 /**
 559  * Disable host events, enable guest events
 560  */
 561 static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
 562 {
 563         struct kvm_host_data *host;
 564         struct kvm_pmu_events *pmu;
 565 
 566         host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
 567         pmu = &host->pmu_events;
 568 
 569         if (pmu->events_host)
 570                 write_sysreg(pmu->events_host, pmcntenclr_el0);
 571 
 572         if (pmu->events_guest)
 573                 write_sysreg(pmu->events_guest, pmcntenset_el0);
 574 
 575         return (pmu->events_host || pmu->events_guest);
 576 }
 577 
 578 /**
 579  * Disable guest events, enable host events
 580  */
 581 static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
 582 {
 583         struct kvm_host_data *host;
 584         struct kvm_pmu_events *pmu;
 585 
 586         host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
 587         pmu = &host->pmu_events;
 588 
 589         if (pmu->events_guest)
 590                 write_sysreg(pmu->events_guest, pmcntenclr_el0);
 591 
 592         if (pmu->events_host)
 593                 write_sysreg(pmu->events_host, pmcntenset_el0);
 594 }
 595 
 596 /* Switch to the guest for VHE systems running in EL2 */
 597 int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 598 {
 599         struct kvm_cpu_context *host_ctxt;
 600         struct kvm_cpu_context *guest_ctxt;
 601         u64 exit_code;
 602 
 603         host_ctxt = vcpu->arch.host_cpu_context;
 604         host_ctxt->__hyp_running_vcpu = vcpu;
 605         guest_ctxt = &vcpu->arch.ctxt;
 606 
 607         sysreg_save_host_state_vhe(host_ctxt);
 608 
 609         /*
 610          * ARM erratum 1165522 requires us to configure both stage 1 and
 611          * stage 2 translation for the guest context before we clear
 612          * HCR_EL2.TGE.
 613          *
 614          * We have already configured the guest's stage 1 translation in
 615          * kvm_vcpu_load_sysregs above.  We must now call __activate_vm
 616          * before __activate_traps, because __activate_vm configures
 617          * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
 618          * (among other things).
 619          */
 620         __activate_vm(vcpu->kvm);
 621         __activate_traps(vcpu);
 622 
 623         sysreg_restore_guest_state_vhe(guest_ctxt);
 624         __debug_switch_to_guest(vcpu);
 625 
 626         __set_guest_arch_workaround_state(vcpu);
 627 
 628         do {
 629                 /* Jump in the fire! */
 630                 exit_code = __guest_enter(vcpu, host_ctxt);
 631 
 632                 /* And we're baaack! */
 633         } while (fixup_guest_exit(vcpu, &exit_code));
 634 
 635         __set_host_arch_workaround_state(vcpu);
 636 
 637         sysreg_save_guest_state_vhe(guest_ctxt);
 638 
 639         __deactivate_traps(vcpu);
 640 
 641         sysreg_restore_host_state_vhe(host_ctxt);
 642 
 643         if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
 644                 __fpsimd_save_fpexc32(vcpu);
 645 
 646         __debug_switch_to_host(vcpu);
 647 
 648         return exit_code;
 649 }
 650 NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
 651 
 652 /* Switch to the guest for legacy non-VHE systems */
 653 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
 654 {
 655         struct kvm_cpu_context *host_ctxt;
 656         struct kvm_cpu_context *guest_ctxt;
 657         bool pmu_switch_needed;
 658         u64 exit_code;
 659 
 660         /*
 661          * Having IRQs masked via PMR when entering the guest means the GIC
 662          * will not signal the CPU of interrupts of lower priority, and the
 663          * only way to get out will be via guest exceptions.
 664          * Naturally, we want to avoid this.
 665          */
 666         if (system_uses_irq_prio_masking()) {
 667                 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 668                 dsb(sy);
 669         }
 670 
 671         vcpu = kern_hyp_va(vcpu);
 672 
 673         host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
 674         host_ctxt->__hyp_running_vcpu = vcpu;
 675         guest_ctxt = &vcpu->arch.ctxt;
 676 
 677         pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
 678 
 679         __sysreg_save_state_nvhe(host_ctxt);
 680 
 681         __activate_vm(kern_hyp_va(vcpu->kvm));
 682         __activate_traps(vcpu);
 683 
 684         __hyp_vgic_restore_state(vcpu);
 685         __timer_enable_traps(vcpu);
 686 
 687         /*
 688          * We must restore the 32-bit state before the sysregs, thanks
 689          * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
 690          */
 691         __sysreg32_restore_state(vcpu);
 692         __sysreg_restore_state_nvhe(guest_ctxt);
 693         __debug_switch_to_guest(vcpu);
 694 
 695         __set_guest_arch_workaround_state(vcpu);
 696 
 697         do {
 698                 /* Jump in the fire! */
 699                 exit_code = __guest_enter(vcpu, host_ctxt);
 700 
 701                 /* And we're baaack! */
 702         } while (fixup_guest_exit(vcpu, &exit_code));
 703 
 704         __set_host_arch_workaround_state(vcpu);
 705 
 706         __sysreg_save_state_nvhe(guest_ctxt);
 707         __sysreg32_save_state(vcpu);
 708         __timer_disable_traps(vcpu);
 709         __hyp_vgic_save_state(vcpu);
 710 
 711         __deactivate_traps(vcpu);
 712         __deactivate_vm(vcpu);
 713 
 714         __sysreg_restore_state_nvhe(host_ctxt);
 715 
 716         if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
 717                 __fpsimd_save_fpexc32(vcpu);
 718 
 719         /*
 720          * This must come after restoring the host sysregs, since a non-VHE
 721          * system may enable SPE here and make use of the TTBRs.
 722          */
 723         __debug_switch_to_host(vcpu);
 724 
 725         if (pmu_switch_needed)
 726                 __pmu_switch_to_host(host_ctxt);
 727 
 728         /* Returning to host will clear PSR.I, remask PMR if needed */
 729         if (system_uses_irq_prio_masking())
 730                 gic_write_pmr(GIC_PRIO_IRQOFF);
 731 
 732         return exit_code;
 733 }
 734 
 735 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
 736 
 737 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
 738                                              struct kvm_cpu_context *__host_ctxt)
 739 {
 740         struct kvm_vcpu *vcpu;
 741         unsigned long str_va;
 742 
 743         vcpu = __host_ctxt->__hyp_running_vcpu;
 744 
 745         if (read_sysreg(vttbr_el2)) {
 746                 __timer_disable_traps(vcpu);
 747                 __deactivate_traps(vcpu);
 748                 __deactivate_vm(vcpu);
 749                 __sysreg_restore_state_nvhe(__host_ctxt);
 750         }
 751 
 752         /*
 753          * Force the panic string to be loaded from the literal pool,
 754          * making sure it is a kernel address and not a PC-relative
 755          * reference.
 756          */
 757         asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
 758 
 759         __hyp_do_panic(str_va,
 760                        spsr, elr,
 761                        read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
 762                        read_sysreg(hpfar_el2), par, vcpu);
 763 }
 764 
 765 static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
 766                                  struct kvm_cpu_context *host_ctxt)
 767 {
 768         struct kvm_vcpu *vcpu;
 769         vcpu = host_ctxt->__hyp_running_vcpu;
 770 
 771         __deactivate_traps(vcpu);
 772         sysreg_restore_host_state_vhe(host_ctxt);
 773 
 774         panic(__hyp_panic_string,
 775               spsr,  elr,
 776               read_sysreg_el2(SYS_ESR),   read_sysreg_el2(SYS_FAR),
 777               read_sysreg(hpfar_el2), par, vcpu);
 778 }
 779 NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
 780 
 781 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
 782 {
 783         u64 spsr = read_sysreg_el2(SYS_SPSR);
 784         u64 elr = read_sysreg_el2(SYS_ELR);
 785         u64 par = read_sysreg(par_el1);
 786 
 787         if (!has_vhe())
 788                 __hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
 789         else
 790                 __hyp_call_panic_vhe(spsr, elr, par, host_ctxt);
 791 
 792         unreachable();
 793 }

/* [<][>][^][v][top][bottom][index][help] */