root/arch/x86/kvm/vmx/ops.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. vmcs_check16
  2. vmcs_check32
  3. vmcs_check64
  4. vmcs_checkl
  5. __vmcs_readl
  6. vmcs_read16
  7. vmcs_read32
  8. vmcs_read64
  9. vmcs_readl
  10. __vmcs_writel
  11. vmcs_write16
  12. vmcs_write32
  13. vmcs_write64
  14. vmcs_writel
  15. vmcs_clear_bits
  16. vmcs_set_bits
  17. vmcs_clear
  18. vmcs_load
  19. __invvpid
  20. __invept
  21. vpid_sync_vcpu_addr
  22. vpid_sync_vcpu_single
  23. vpid_sync_vcpu_global
  24. vpid_sync_context
  25. ept_sync_global
  26. ept_sync_context

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef __KVM_X86_VMX_INSN_H
   3 #define __KVM_X86_VMX_INSN_H
   4 
   5 #include <linux/nospec.h>
   6 
   7 #include <asm/kvm_host.h>
   8 #include <asm/vmx.h>
   9 
  10 #include "evmcs.h"
  11 #include "vmcs.h"
  12 
  13 #define __ex(x) __kvm_handle_fault_on_reboot(x)
  14 
  15 asmlinkage void vmread_error(unsigned long field, bool fault);
  16 __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
  17                                                          bool fault);
  18 void vmwrite_error(unsigned long field, unsigned long value);
  19 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
  20 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
  21 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
  22 void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
  23 
  24 static __always_inline void vmcs_check16(unsigned long field)
  25 {
  26         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
  27                          "16-bit accessor invalid for 64-bit field");
  28         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  29                          "16-bit accessor invalid for 64-bit high field");
  30         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  31                          "16-bit accessor invalid for 32-bit high field");
  32         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  33                          "16-bit accessor invalid for natural width field");
  34 }
  35 
  36 static __always_inline void vmcs_check32(unsigned long field)
  37 {
  38         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  39                          "32-bit accessor invalid for 16-bit field");
  40         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  41                          "32-bit accessor invalid for natural width field");
  42 }
  43 
  44 static __always_inline void vmcs_check64(unsigned long field)
  45 {
  46         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  47                          "64-bit accessor invalid for 16-bit field");
  48         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  49                          "64-bit accessor invalid for 64-bit high field");
  50         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  51                          "64-bit accessor invalid for 32-bit field");
  52         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  53                          "64-bit accessor invalid for natural width field");
  54 }
  55 
  56 static __always_inline void vmcs_checkl(unsigned long field)
  57 {
  58         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  59                          "Natural width accessor invalid for 16-bit field");
  60         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
  61                          "Natural width accessor invalid for 64-bit field");
  62         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  63                          "Natural width accessor invalid for 64-bit high field");
  64         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  65                          "Natural width accessor invalid for 32-bit field");
  66 }
  67 
  68 static __always_inline unsigned long __vmcs_readl(unsigned long field)
  69 {
  70         unsigned long value;
  71 
  72         asm volatile("1: vmread %2, %1\n\t"
  73                      ".byte 0x3e\n\t" /* branch taken hint */
  74                      "ja 3f\n\t"
  75 
  76                      /*
  77                       * VMREAD failed.  Push '0' for @fault, push the failing
  78                       * @field, and bounce through the trampoline to preserve
  79                       * volatile registers.
  80                       */
  81                      "push $0\n\t"
  82                      "push %2\n\t"
  83                      "2:call vmread_error_trampoline\n\t"
  84 
  85                      /*
  86                       * Unwind the stack.  Note, the trampoline zeros out the
  87                       * memory for @fault so that the result is '0' on error.
  88                       */
  89                      "pop %2\n\t"
  90                      "pop %1\n\t"
  91                      "3:\n\t"
  92 
  93                      /* VMREAD faulted.  As above, except push '1' for @fault. */
  94                      ".pushsection .fixup, \"ax\"\n\t"
  95                      "4: push $1\n\t"
  96                      "push %2\n\t"
  97                      "jmp 2b\n\t"
  98                      ".popsection\n\t"
  99                      _ASM_EXTABLE(1b, 4b)
 100                      : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
 101         return value;
 102 }
 103 
 104 static __always_inline u16 vmcs_read16(unsigned long field)
 105 {
 106         vmcs_check16(field);
 107         if (static_branch_unlikely(&enable_evmcs))
 108                 return evmcs_read16(field);
 109         return __vmcs_readl(field);
 110 }
 111 
 112 static __always_inline u32 vmcs_read32(unsigned long field)
 113 {
 114         vmcs_check32(field);
 115         if (static_branch_unlikely(&enable_evmcs))
 116                 return evmcs_read32(field);
 117         return __vmcs_readl(field);
 118 }
 119 
 120 static __always_inline u64 vmcs_read64(unsigned long field)
 121 {
 122         vmcs_check64(field);
 123         if (static_branch_unlikely(&enable_evmcs))
 124                 return evmcs_read64(field);
 125 #ifdef CONFIG_X86_64
 126         return __vmcs_readl(field);
 127 #else
 128         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
 129 #endif
 130 }
 131 
 132 static __always_inline unsigned long vmcs_readl(unsigned long field)
 133 {
 134         vmcs_checkl(field);
 135         if (static_branch_unlikely(&enable_evmcs))
 136                 return evmcs_read64(field);
 137         return __vmcs_readl(field);
 138 }
 139 
 140 #define vmx_asm1(insn, op1, error_args...)                              \
 141 do {                                                                    \
 142         asm_volatile_goto("1: " __stringify(insn) " %0\n\t"             \
 143                           ".byte 0x2e\n\t" /* branch not taken hint */  \
 144                           "jna %l[error]\n\t"                           \
 145                           _ASM_EXTABLE(1b, %l[fault])                   \
 146                           : : op1 : "cc" : error, fault);               \
 147         return;                                                         \
 148 error:                                                                  \
 149         insn##_error(error_args);                                       \
 150         return;                                                         \
 151 fault:                                                                  \
 152         kvm_spurious_fault();                                           \
 153 } while (0)
 154 
 155 #define vmx_asm2(insn, op1, op2, error_args...)                         \
 156 do {                                                                    \
 157         asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"        \
 158                           ".byte 0x2e\n\t" /* branch not taken hint */  \
 159                           "jna %l[error]\n\t"                           \
 160                           _ASM_EXTABLE(1b, %l[fault])                   \
 161                           : : op1, op2 : "cc" : error, fault);          \
 162         return;                                                         \
 163 error:                                                                  \
 164         insn##_error(error_args);                                       \
 165         return;                                                         \
 166 fault:                                                                  \
 167         kvm_spurious_fault();                                           \
 168 } while (0)
 169 
 170 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
 171 {
 172         vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
 173 }
 174 
 175 static __always_inline void vmcs_write16(unsigned long field, u16 value)
 176 {
 177         vmcs_check16(field);
 178         if (static_branch_unlikely(&enable_evmcs))
 179                 return evmcs_write16(field, value);
 180 
 181         __vmcs_writel(field, value);
 182 }
 183 
 184 static __always_inline void vmcs_write32(unsigned long field, u32 value)
 185 {
 186         vmcs_check32(field);
 187         if (static_branch_unlikely(&enable_evmcs))
 188                 return evmcs_write32(field, value);
 189 
 190         __vmcs_writel(field, value);
 191 }
 192 
 193 static __always_inline void vmcs_write64(unsigned long field, u64 value)
 194 {
 195         vmcs_check64(field);
 196         if (static_branch_unlikely(&enable_evmcs))
 197                 return evmcs_write64(field, value);
 198 
 199         __vmcs_writel(field, value);
 200 #ifndef CONFIG_X86_64
 201         __vmcs_writel(field+1, value >> 32);
 202 #endif
 203 }
 204 
 205 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
 206 {
 207         vmcs_checkl(field);
 208         if (static_branch_unlikely(&enable_evmcs))
 209                 return evmcs_write64(field, value);
 210 
 211         __vmcs_writel(field, value);
 212 }
 213 
 214 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
 215 {
 216         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
 217                          "vmcs_clear_bits does not support 64-bit fields");
 218         if (static_branch_unlikely(&enable_evmcs))
 219                 return evmcs_write32(field, evmcs_read32(field) & ~mask);
 220 
 221         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
 222 }
 223 
 224 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
 225 {
 226         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
 227                          "vmcs_set_bits does not support 64-bit fields");
 228         if (static_branch_unlikely(&enable_evmcs))
 229                 return evmcs_write32(field, evmcs_read32(field) | mask);
 230 
 231         __vmcs_writel(field, __vmcs_readl(field) | mask);
 232 }
 233 
 234 static inline void vmcs_clear(struct vmcs *vmcs)
 235 {
 236         u64 phys_addr = __pa(vmcs);
 237 
 238         vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
 239 }
 240 
 241 static inline void vmcs_load(struct vmcs *vmcs)
 242 {
 243         u64 phys_addr = __pa(vmcs);
 244 
 245         if (static_branch_unlikely(&enable_evmcs))
 246                 return evmcs_load(phys_addr);
 247 
 248         vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
 249 }
 250 
 251 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
 252 {
 253         struct {
 254                 u64 vpid : 16;
 255                 u64 rsvd : 48;
 256                 u64 gva;
 257         } operand = { vpid, 0, gva };
 258 
 259         vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
 260 }
 261 
 262 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
 263 {
 264         struct {
 265                 u64 eptp, gpa;
 266         } operand = {eptp, gpa};
 267 
 268         vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
 269 }
 270 
 271 static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
 272 {
 273         if (vpid == 0)
 274                 return true;
 275 
 276         if (cpu_has_vmx_invvpid_individual_addr()) {
 277                 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
 278                 return true;
 279         }
 280 
 281         return false;
 282 }
 283 
 284 static inline void vpid_sync_vcpu_single(int vpid)
 285 {
 286         if (vpid == 0)
 287                 return;
 288 
 289         if (cpu_has_vmx_invvpid_single())
 290                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
 291 }
 292 
 293 static inline void vpid_sync_vcpu_global(void)
 294 {
 295         if (cpu_has_vmx_invvpid_global())
 296                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
 297 }
 298 
 299 static inline void vpid_sync_context(int vpid)
 300 {
 301         if (cpu_has_vmx_invvpid_single())
 302                 vpid_sync_vcpu_single(vpid);
 303         else
 304                 vpid_sync_vcpu_global();
 305 }
 306 
 307 static inline void ept_sync_global(void)
 308 {
 309         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
 310 }
 311 
 312 static inline void ept_sync_context(u64 eptp)
 313 {
 314         if (cpu_has_vmx_invept_context())
 315                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
 316         else
 317                 ept_sync_global();
 318 }
 319 
 320 #endif /* __KVM_X86_VMX_INSN_H */

/* [<][>][^][v][top][bottom][index][help] */