Lines Matching refs:vmx

821 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
822 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
1188 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) in __find_msr_index() argument
1192 for (i = 0; i < vmx->nmsrs; ++i) in __find_msr_index()
1193 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) in __find_msr_index()
1224 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) in find_msr_entry() argument
1228 i = __find_msr_index(vmx, msr); in find_msr_entry()
1230 return &vmx->guest_msrs[i]; in find_msr_entry()
1340 static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx) in vpid_sync_vcpu_single() argument
1342 if (vmx->vpid == 0) in vpid_sync_vcpu_single()
1346 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0); in vpid_sync_vcpu_single()
1355 static inline void vpid_sync_context(struct vcpu_vmx *vmx) in vpid_sync_context() argument
1358 vpid_sync_vcpu_single(vmx); in vpid_sync_context()
1453 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_init() argument
1456 vmx->vm_entry_controls_shadow = val; in vm_entry_controls_init()
1459 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_set() argument
1461 if (vmx->vm_entry_controls_shadow != val) in vm_entry_controls_set()
1462 vm_entry_controls_init(vmx, val); in vm_entry_controls_set()
1465 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) in vm_entry_controls_get() argument
1467 return vmx->vm_entry_controls_shadow; in vm_entry_controls_get()
1471 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_setbit() argument
1473 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); in vm_entry_controls_setbit()
1476 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_clearbit() argument
1478 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); in vm_entry_controls_clearbit()
1481 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_init() argument
1484 vmx->vm_exit_controls_shadow = val; in vm_exit_controls_init()
1487 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_set() argument
1489 if (vmx->vm_exit_controls_shadow != val) in vm_exit_controls_set()
1490 vm_exit_controls_init(vmx, val); in vm_exit_controls_set()
1493 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) in vm_exit_controls_get() argument
1495 return vmx->vm_exit_controls_shadow; in vm_exit_controls_get()
1499 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_setbit() argument
1501 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); in vm_exit_controls_setbit()
1504 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_clearbit() argument
1506 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); in vm_exit_controls_clearbit()
1509 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) in vmx_segment_cache_clear() argument
1511 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_clear()
1514 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, in vmx_segment_cache_test_set() argument
1520 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { in vmx_segment_cache_test_set()
1521 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); in vmx_segment_cache_test_set()
1522 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_test_set()
1524 ret = vmx->segment_cache.bitmask & mask; in vmx_segment_cache_test_set()
1525 vmx->segment_cache.bitmask |= mask; in vmx_segment_cache_test_set()
1529 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_selector() argument
1531 u16 *p = &vmx->segment_cache.seg[seg].selector; in vmx_read_guest_seg_selector()
1533 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) in vmx_read_guest_seg_selector()
1538 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_base() argument
1540 ulong *p = &vmx->segment_cache.seg[seg].base; in vmx_read_guest_seg_base()
1542 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) in vmx_read_guest_seg_base()
1547 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_limit() argument
1549 u32 *p = &vmx->segment_cache.seg[seg].limit; in vmx_read_guest_seg_limit()
1551 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) in vmx_read_guest_seg_limit()
1556 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_ar() argument
1558 u32 *p = &vmx->segment_cache.seg[seg].ar; in vmx_read_guest_seg_ar()
1560 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) in vmx_read_guest_seg_ar()
1593 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, in clear_atomic_switch_msr_special() argument
1596 vm_entry_controls_clearbit(vmx, entry); in clear_atomic_switch_msr_special()
1597 vm_exit_controls_clearbit(vmx, exit); in clear_atomic_switch_msr_special()
1600 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) in clear_atomic_switch_msr() argument
1603 struct msr_autoload *m = &vmx->msr_autoload; in clear_atomic_switch_msr()
1608 clear_atomic_switch_msr_special(vmx, in clear_atomic_switch_msr()
1616 clear_atomic_switch_msr_special(vmx, in clear_atomic_switch_msr()
1637 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, in add_atomic_switch_msr_special() argument
1644 vm_entry_controls_setbit(vmx, entry); in add_atomic_switch_msr_special()
1645 vm_exit_controls_setbit(vmx, exit); in add_atomic_switch_msr_special()
1648 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, in add_atomic_switch_msr() argument
1652 struct msr_autoload *m = &vmx->msr_autoload; in add_atomic_switch_msr()
1657 add_atomic_switch_msr_special(vmx, in add_atomic_switch_msr()
1668 add_atomic_switch_msr_special(vmx, in add_atomic_switch_msr()
1719 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) in update_transition_efer() argument
1721 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer()
1747 clear_atomic_switch_msr(vmx, MSR_EFER); in update_transition_efer()
1755 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer()
1759 add_atomic_switch_msr(vmx, MSR_EFER, in update_transition_efer()
1766 vmx->guest_msrs[efer_offset].data = guest_efer; in update_transition_efer()
1767 vmx->guest_msrs[efer_offset].mask = ~ignore_bits; in update_transition_efer()
1811 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_save_host_state() local
1814 if (vmx->host_state.loaded) in vmx_save_host_state()
1817 vmx->host_state.loaded = 1; in vmx_save_host_state()
1822 vmx->host_state.ldt_sel = kvm_read_ldt(); in vmx_save_host_state()
1823 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; in vmx_save_host_state()
1824 savesegment(fs, vmx->host_state.fs_sel); in vmx_save_host_state()
1825 if (!(vmx->host_state.fs_sel & 7)) { in vmx_save_host_state()
1826 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); in vmx_save_host_state()
1827 vmx->host_state.fs_reload_needed = 0; in vmx_save_host_state()
1830 vmx->host_state.fs_reload_needed = 1; in vmx_save_host_state()
1832 savesegment(gs, vmx->host_state.gs_sel); in vmx_save_host_state()
1833 if (!(vmx->host_state.gs_sel & 7)) in vmx_save_host_state()
1834 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); in vmx_save_host_state()
1837 vmx->host_state.gs_ldt_reload_needed = 1; in vmx_save_host_state()
1841 savesegment(ds, vmx->host_state.ds_sel); in vmx_save_host_state()
1842 savesegment(es, vmx->host_state.es_sel); in vmx_save_host_state()
1849 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); in vmx_save_host_state()
1850 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); in vmx_save_host_state()
1854 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); in vmx_save_host_state()
1855 if (is_long_mode(&vmx->vcpu)) in vmx_save_host_state()
1856 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_save_host_state()
1859 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); in vmx_save_host_state()
1860 for (i = 0; i < vmx->save_nmsrs; ++i) in vmx_save_host_state()
1861 kvm_set_shared_msr(vmx->guest_msrs[i].index, in vmx_save_host_state()
1862 vmx->guest_msrs[i].data, in vmx_save_host_state()
1863 vmx->guest_msrs[i].mask); in vmx_save_host_state()
1866 static void __vmx_load_host_state(struct vcpu_vmx *vmx) in __vmx_load_host_state() argument
1868 if (!vmx->host_state.loaded) in __vmx_load_host_state()
1871 ++vmx->vcpu.stat.host_state_reload; in __vmx_load_host_state()
1872 vmx->host_state.loaded = 0; in __vmx_load_host_state()
1874 if (is_long_mode(&vmx->vcpu)) in __vmx_load_host_state()
1875 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in __vmx_load_host_state()
1877 if (vmx->host_state.gs_ldt_reload_needed) { in __vmx_load_host_state()
1878 kvm_load_ldt(vmx->host_state.ldt_sel); in __vmx_load_host_state()
1880 load_gs_index(vmx->host_state.gs_sel); in __vmx_load_host_state()
1882 loadsegment(gs, vmx->host_state.gs_sel); in __vmx_load_host_state()
1885 if (vmx->host_state.fs_reload_needed) in __vmx_load_host_state()
1886 loadsegment(fs, vmx->host_state.fs_sel); in __vmx_load_host_state()
1888 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { in __vmx_load_host_state()
1889 loadsegment(ds, vmx->host_state.ds_sel); in __vmx_load_host_state()
1890 loadsegment(es, vmx->host_state.es_sel); in __vmx_load_host_state()
1895 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); in __vmx_load_host_state()
1897 if (vmx->host_state.msr_host_bndcfgs) in __vmx_load_host_state()
1898 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); in __vmx_load_host_state()
1903 if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) in __vmx_load_host_state()
1908 static void vmx_load_host_state(struct vcpu_vmx *vmx) in vmx_load_host_state() argument
1911 __vmx_load_host_state(vmx); in vmx_load_host_state()
1921 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load() local
1926 else if (vmx->loaded_vmcs->cpu != cpu) in vmx_vcpu_load()
1927 loaded_vmcs_clear(vmx->loaded_vmcs); in vmx_vcpu_load()
1929 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { in vmx_vcpu_load()
1930 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; in vmx_vcpu_load()
1931 vmcs_load(vmx->loaded_vmcs->vmcs); in vmx_vcpu_load()
1934 if (vmx->loaded_vmcs->cpu != cpu) { in vmx_vcpu_load()
1949 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, in vmx_vcpu_load()
1963 vmx->loaded_vmcs->cpu = cpu; in vmx_vcpu_load()
2131 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception() local
2143 if (vmx->rmode.vm86_active) { in vmx_queue_exception()
2154 vmx->vcpu.arch.event_exit_inst_len); in vmx_queue_exception()
2175 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) in move_msr_up() argument
2179 tmp = vmx->guest_msrs[to]; in move_msr_up()
2180 vmx->guest_msrs[to] = vmx->guest_msrs[from]; in move_msr_up()
2181 vmx->guest_msrs[from] = tmp; in move_msr_up()
2211 static void setup_msrs(struct vcpu_vmx *vmx) in setup_msrs() argument
2217 if (is_long_mode(&vmx->vcpu)) { in setup_msrs()
2218 index = __find_msr_index(vmx, MSR_SYSCALL_MASK); in setup_msrs()
2220 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2221 index = __find_msr_index(vmx, MSR_LSTAR); in setup_msrs()
2223 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2224 index = __find_msr_index(vmx, MSR_CSTAR); in setup_msrs()
2226 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2227 index = __find_msr_index(vmx, MSR_TSC_AUX); in setup_msrs()
2228 if (index >= 0 && vmx->rdtscp_enabled) in setup_msrs()
2229 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2234 index = __find_msr_index(vmx, MSR_STAR); in setup_msrs()
2235 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) in setup_msrs()
2236 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2239 index = __find_msr_index(vmx, MSR_EFER); in setup_msrs()
2240 if (index >= 0 && update_transition_efer(vmx, index)) in setup_msrs()
2241 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2243 vmx->save_nmsrs = save_nmsrs; in setup_msrs()
2246 vmx_set_msr_bitmap(&vmx->vcpu); in setup_msrs()
2368 static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) in nested_vmx_setup_ctls_msrs() argument
2387 vmx->nested.nested_vmx_pinbased_ctls_low, in nested_vmx_setup_ctls_msrs()
2388 vmx->nested.nested_vmx_pinbased_ctls_high); in nested_vmx_setup_ctls_msrs()
2389 vmx->nested.nested_vmx_pinbased_ctls_low |= in nested_vmx_setup_ctls_msrs()
2391 vmx->nested.nested_vmx_pinbased_ctls_high &= in nested_vmx_setup_ctls_msrs()
2395 vmx->nested.nested_vmx_pinbased_ctls_high |= in nested_vmx_setup_ctls_msrs()
2398 if (vmx_vm_has_apicv(vmx->vcpu.kvm)) in nested_vmx_setup_ctls_msrs()
2399 vmx->nested.nested_vmx_pinbased_ctls_high |= in nested_vmx_setup_ctls_msrs()
2404 vmx->nested.nested_vmx_exit_ctls_low, in nested_vmx_setup_ctls_msrs()
2405 vmx->nested.nested_vmx_exit_ctls_high); in nested_vmx_setup_ctls_msrs()
2406 vmx->nested.nested_vmx_exit_ctls_low = in nested_vmx_setup_ctls_msrs()
2409 vmx->nested.nested_vmx_exit_ctls_high &= in nested_vmx_setup_ctls_msrs()
2414 vmx->nested.nested_vmx_exit_ctls_high |= in nested_vmx_setup_ctls_msrs()
2420 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; in nested_vmx_setup_ctls_msrs()
2423 vmx->nested.nested_vmx_true_exit_ctls_low = in nested_vmx_setup_ctls_msrs()
2424 vmx->nested.nested_vmx_exit_ctls_low & in nested_vmx_setup_ctls_msrs()
2429 vmx->nested.nested_vmx_entry_ctls_low, in nested_vmx_setup_ctls_msrs()
2430 vmx->nested.nested_vmx_entry_ctls_high); in nested_vmx_setup_ctls_msrs()
2431 vmx->nested.nested_vmx_entry_ctls_low = in nested_vmx_setup_ctls_msrs()
2433 vmx->nested.nested_vmx_entry_ctls_high &= in nested_vmx_setup_ctls_msrs()
2438 vmx->nested.nested_vmx_entry_ctls_high |= in nested_vmx_setup_ctls_msrs()
2441 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; in nested_vmx_setup_ctls_msrs()
2444 vmx->nested.nested_vmx_true_entry_ctls_low = in nested_vmx_setup_ctls_msrs()
2445 vmx->nested.nested_vmx_entry_ctls_low & in nested_vmx_setup_ctls_msrs()
2450 vmx->nested.nested_vmx_procbased_ctls_low, in nested_vmx_setup_ctls_msrs()
2451 vmx->nested.nested_vmx_procbased_ctls_high); in nested_vmx_setup_ctls_msrs()
2452 vmx->nested.nested_vmx_procbased_ctls_low = in nested_vmx_setup_ctls_msrs()
2454 vmx->nested.nested_vmx_procbased_ctls_high &= in nested_vmx_setup_ctls_msrs()
2474 vmx->nested.nested_vmx_procbased_ctls_high |= in nested_vmx_setup_ctls_msrs()
2479 vmx->nested.nested_vmx_true_procbased_ctls_low = in nested_vmx_setup_ctls_msrs()
2480 vmx->nested.nested_vmx_procbased_ctls_low & in nested_vmx_setup_ctls_msrs()
2485 vmx->nested.nested_vmx_secondary_ctls_low, in nested_vmx_setup_ctls_msrs()
2486 vmx->nested.nested_vmx_secondary_ctls_high); in nested_vmx_setup_ctls_msrs()
2487 vmx->nested.nested_vmx_secondary_ctls_low = 0; in nested_vmx_setup_ctls_msrs()
2488 vmx->nested.nested_vmx_secondary_ctls_high &= in nested_vmx_setup_ctls_msrs()
2499 vmx->nested.nested_vmx_secondary_ctls_high |= in nested_vmx_setup_ctls_msrs()
2501 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | in nested_vmx_setup_ctls_msrs()
2504 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept; in nested_vmx_setup_ctls_msrs()
2510 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT; in nested_vmx_setup_ctls_msrs()
2512 vmx->nested.nested_vmx_ept_caps = 0; in nested_vmx_setup_ctls_msrs()
2515 vmx->nested.nested_vmx_secondary_ctls_high |= in nested_vmx_setup_ctls_msrs()
2520 vmx->nested.nested_vmx_misc_low, in nested_vmx_setup_ctls_msrs()
2521 vmx->nested.nested_vmx_misc_high); in nested_vmx_setup_ctls_msrs()
2522 vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA; in nested_vmx_setup_ctls_msrs()
2523 vmx->nested.nested_vmx_misc_low |= in nested_vmx_setup_ctls_msrs()
2526 vmx->nested.nested_vmx_misc_high = 0; in nested_vmx_setup_ctls_msrs()
2545 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_vmx_msr() local
2562 vmx->nested.nested_vmx_pinbased_ctls_low, in vmx_get_vmx_msr()
2563 vmx->nested.nested_vmx_pinbased_ctls_high); in vmx_get_vmx_msr()
2567 vmx->nested.nested_vmx_true_procbased_ctls_low, in vmx_get_vmx_msr()
2568 vmx->nested.nested_vmx_procbased_ctls_high); in vmx_get_vmx_msr()
2572 vmx->nested.nested_vmx_procbased_ctls_low, in vmx_get_vmx_msr()
2573 vmx->nested.nested_vmx_procbased_ctls_high); in vmx_get_vmx_msr()
2577 vmx->nested.nested_vmx_true_exit_ctls_low, in vmx_get_vmx_msr()
2578 vmx->nested.nested_vmx_exit_ctls_high); in vmx_get_vmx_msr()
2582 vmx->nested.nested_vmx_exit_ctls_low, in vmx_get_vmx_msr()
2583 vmx->nested.nested_vmx_exit_ctls_high); in vmx_get_vmx_msr()
2587 vmx->nested.nested_vmx_true_entry_ctls_low, in vmx_get_vmx_msr()
2588 vmx->nested.nested_vmx_entry_ctls_high); in vmx_get_vmx_msr()
2592 vmx->nested.nested_vmx_entry_ctls_low, in vmx_get_vmx_msr()
2593 vmx->nested.nested_vmx_entry_ctls_high); in vmx_get_vmx_msr()
2597 vmx->nested.nested_vmx_misc_low, in vmx_get_vmx_msr()
2598 vmx->nested.nested_vmx_misc_high); in vmx_get_vmx_msr()
2624 vmx->nested.nested_vmx_secondary_ctls_low, in vmx_get_vmx_msr()
2625 vmx->nested.nested_vmx_secondary_ctls_high); in vmx_get_vmx_msr()
2629 *pdata = vmx->nested.nested_vmx_ept_caps; in vmx_get_vmx_msr()
2725 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr() local
2737 vmx_segment_cache_clear(vmx); in vmx_set_msr()
2741 vmx_segment_cache_clear(vmx); in vmx_set_msr()
2745 vmx_load_host_state(vmx); in vmx_set_msr()
2746 vmx->msr_guest_kernel_gs_base = data; in vmx_set_msr()
2784 vmx->nested.msr_ia32_feature_control = data; in vmx_set_msr()
2801 add_atomic_switch_msr(vmx, MSR_IA32_XSS, in vmx_set_msr()
2804 clear_atomic_switch_msr(vmx, MSR_IA32_XSS); in vmx_set_msr()
2807 if (!vmx->rdtscp_enabled) in vmx_set_msr()
2814 msr = find_msr_entry(vmx, msr_index); in vmx_set_msr()
2818 if (msr - vmx->guest_msrs < vmx->save_nmsrs) { in vmx_set_msr()
3299 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode() local
3305 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode()
3306 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode()
3307 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode()
3308 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode()
3309 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode()
3310 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode()
3312 vmx->rmode.vm86_active = 0; in enter_pmode()
3314 vmx_segment_cache_clear(vmx); in enter_pmode()
3316 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode()
3320 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; in enter_pmode()
3328 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode()
3329 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode()
3330 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_pmode()
3331 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_pmode()
3332 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_pmode()
3333 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_pmode()
3372 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode() local
3374 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_rmode()
3375 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_rmode()
3376 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_rmode()
3377 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_rmode()
3378 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_rmode()
3379 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_rmode()
3380 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_rmode()
3382 vmx->rmode.vm86_active = 1; in enter_rmode()
3392 vmx_segment_cache_clear(vmx); in enter_rmode()
3399 vmx->rmode.save_rflags = flags; in enter_rmode()
3407 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_rmode()
3408 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_rmode()
3409 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_rmode()
3410 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_rmode()
3411 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_rmode()
3412 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_rmode()
3419 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer() local
3420 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); in vmx_set_efer()
3439 setup_msrs(vmx); in vmx_set_efer()
3567 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0() local
3576 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) in vmx_set_cr0()
3579 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) in vmx_set_cr0()
3603 vmx->emulation_required = emulation_required(vcpu); in vmx_set_cr0()
3695 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment() local
3698 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_get_segment()
3699 *var = vmx->rmode.segs[seg]; in vmx_get_segment()
3701 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) in vmx_get_segment()
3703 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3704 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3707 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3708 var->limit = vmx_read_guest_seg_limit(vmx, seg); in vmx_get_segment()
3709 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3710 ar = vmx_read_guest_seg_ar(vmx, seg); in vmx_get_segment()
3742 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl() local
3744 if (unlikely(vmx->rmode.vm86_active)) in vmx_get_cpl()
3747 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); in vmx_get_cpl()
3775 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment() local
3778 vmx_segment_cache_clear(vmx); in vmx_set_segment()
3780 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_set_segment()
3781 vmx->rmode.segs[seg] = *var; in vmx_set_segment()
3785 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); in vmx_set_segment()
3810 vmx->emulation_required = emulation_required(vcpu); in vmx_set_segment()
4184 static void allocate_vpid(struct vcpu_vmx *vmx) in allocate_vpid() argument
4188 vmx->vpid = 0; in allocate_vpid()
4194 vmx->vpid = vpid; in allocate_vpid()
4200 static void free_vpid(struct vcpu_vmx *vmx) in free_vpid() argument
4205 if (vmx->vpid != 0) in free_vpid()
4206 __clear_bit(vmx->vpid, vmx_vpid_bitmap); in free_vpid()
4368 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt() local
4373 if (vmx->nested.pi_desc && in vmx_complete_nested_posted_interrupt()
4374 vmx->nested.pi_pending) { in vmx_complete_nested_posted_interrupt()
4375 vmx->nested.pi_pending = false; in vmx_complete_nested_posted_interrupt()
4376 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) in vmx_complete_nested_posted_interrupt()
4380 (unsigned long *)vmx->nested.pi_desc->pir, 256); in vmx_complete_nested_posted_interrupt()
4385 vapic_page = kmap(vmx->nested.virtual_apic_page); in vmx_complete_nested_posted_interrupt()
4390 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page); in vmx_complete_nested_posted_interrupt()
4391 kunmap(vmx->nested.virtual_apic_page); in vmx_complete_nested_posted_interrupt()
4418 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt() local
4421 vector == vmx->nested.posted_intr_nv) { in vmx_deliver_nested_posted_interrupt()
4428 vmx->nested.pi_pending = true; in vmx_deliver_nested_posted_interrupt()
4443 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt() local
4450 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) in vmx_deliver_posted_interrupt()
4453 r = pi_test_and_set_on(&vmx->pi_desc); in vmx_deliver_posted_interrupt()
4461 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr() local
4463 if (!pi_test_and_clear_on(&vmx->pi_desc)) in vmx_sync_pir_to_irr()
4466 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); in vmx_sync_pir_to_irr()
4480 static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) in vmx_set_constant_host_state() argument
4493 vmx->host_state.vmcs_host_cr4 = cr4; in vmx_set_constant_host_state()
4513 vmx->host_idt_base = dt.address; in vmx_set_constant_host_state()
4528 static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) in set_cr4_guest_host_mask() argument
4530 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; in set_cr4_guest_host_mask()
4532 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; in set_cr4_guest_host_mask()
4533 if (is_guest_mode(&vmx->vcpu)) in set_cr4_guest_host_mask()
4534 vmx->vcpu.arch.cr4_guest_owned_bits &= in set_cr4_guest_host_mask()
4535 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; in set_cr4_guest_host_mask()
4536 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); in set_cr4_guest_host_mask()
4539 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) in vmx_pin_based_exec_ctrl() argument
4543 if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) in vmx_pin_based_exec_ctrl()
4548 static u32 vmx_exec_control(struct vcpu_vmx *vmx) in vmx_exec_control() argument
4552 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) in vmx_exec_control()
4555 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { in vmx_exec_control()
4569 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) in vmx_secondary_exec_control() argument
4572 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) in vmx_secondary_exec_control()
4574 if (vmx->vpid == 0) in vmx_secondary_exec_control()
4586 if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) in vmx_secondary_exec_control()
4617 static int vmx_vcpu_setup(struct vcpu_vmx *vmx) in vmx_vcpu_setup() argument
4638 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); in vmx_vcpu_setup()
4640 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); in vmx_vcpu_setup()
4644 vmx_secondary_exec_control(vmx)); in vmx_vcpu_setup()
4647 if (vmx_vm_has_apicv(vmx->vcpu.kvm)) { in vmx_vcpu_setup()
4656 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); in vmx_vcpu_setup()
4661 vmx->ple_window = ple_window; in vmx_vcpu_setup()
4662 vmx->ple_window_dirty = true; in vmx_vcpu_setup()
4671 vmx_set_constant_host_state(vmx); in vmx_vcpu_setup()
4684 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); in vmx_vcpu_setup()
4686 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); in vmx_vcpu_setup()
4696 vmx->vcpu.arch.pat = host_pat; in vmx_vcpu_setup()
4702 int j = vmx->nmsrs; in vmx_vcpu_setup()
4708 vmx->guest_msrs[j].index = i; in vmx_vcpu_setup()
4709 vmx->guest_msrs[j].data = 0; in vmx_vcpu_setup()
4710 vmx->guest_msrs[j].mask = -1ull; in vmx_vcpu_setup()
4711 ++vmx->nmsrs; in vmx_vcpu_setup()
4715 vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); in vmx_vcpu_setup()
4718 vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); in vmx_vcpu_setup()
4721 set_cr4_guest_host_mask(vmx); in vmx_vcpu_setup()
4731 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset() local
4734 vmx->rmode.vm86_active = 0; in vmx_vcpu_reset()
4736 vmx->soft_vnmi_blocked = 0; in vmx_vcpu_reset()
4738 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); in vmx_vcpu_reset()
4739 kvm_set_cr8(&vmx->vcpu, 0); in vmx_vcpu_reset()
4741 if (kvm_vcpu_is_reset_bsp(&vmx->vcpu)) in vmx_vcpu_reset()
4744 kvm_set_apic_base(&vmx->vcpu, &apic_base_msr); in vmx_vcpu_reset()
4746 vmx_segment_cache_clear(vmx); in vmx_vcpu_reset()
4788 setup_msrs(vmx); in vmx_vcpu_reset()
4794 if (vm_need_tpr_shadow(vmx->vcpu.kvm)) in vmx_vcpu_reset()
4796 __pa(vmx->vcpu.arch.apic->regs)); in vmx_vcpu_reset()
4803 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); in vmx_vcpu_reset()
4805 if (vmx->vpid != 0) in vmx_vcpu_reset()
4806 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in vmx_vcpu_reset()
4808 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; in vmx_vcpu_reset()
4809 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ in vmx_vcpu_reset()
4810 vmx_set_cr4(&vmx->vcpu, 0); in vmx_vcpu_reset()
4811 vmx_set_efer(&vmx->vcpu, 0); in vmx_vcpu_reset()
4812 vmx_fpu_activate(&vmx->vcpu); in vmx_vcpu_reset()
4813 update_exception_bitmap(&vmx->vcpu); in vmx_vcpu_reset()
4815 vpid_sync_context(vmx); in vmx_vcpu_reset()
4870 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq() local
4877 if (vmx->rmode.vm86_active) { in vmx_inject_irq()
4889 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_irq()
4897 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi() local
4911 vmx->soft_vnmi_blocked = 1; in vmx_inject_nmi()
4912 vmx->vnmi_blocked_time = 0; in vmx_inject_nmi()
4916 vmx->nmi_known_unmasked = false; in vmx_inject_nmi()
4917 if (vmx->rmode.vm86_active) { in vmx_inject_nmi()
4937 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask() local
4940 if (vmx->soft_vnmi_blocked != masked) { in vmx_set_nmi_mask()
4941 vmx->soft_vnmi_blocked = masked; in vmx_set_nmi_mask()
4942 vmx->vnmi_blocked_time = 0; in vmx_set_nmi_mask()
4945 vmx->nmi_known_unmasked = !masked; in vmx_set_nmi_mask()
5079 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception() local
5086 vect_info = vmx->idt_vectoring_info; in handle_exception()
5087 intr_info = vmx->exit_intr_info; in handle_exception()
5144 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) in handle_exception()
5172 vmx->vcpu.arch.event_exit_inst_len = in handle_exception()
5679 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch() local
5686 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); in handle_task_switch()
5687 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); in handle_task_switch()
5688 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); in handle_task_switch()
5704 if (vmx->idt_vectoring_info & in handle_task_switch()
5910 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state() local
5920 while (vmx->emulation_required && count-- != 0) { in handle_invalid_guest_state()
5922 return handle_interrupt_window(&vmx->vcpu); in handle_invalid_guest_state()
5988 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window() local
5989 int old = vmx->ple_window; in grow_ple_window()
5991 vmx->ple_window = __grow_ple_window(old); in grow_ple_window()
5993 if (vmx->ple_window != old) in grow_ple_window()
5994 vmx->ple_window_dirty = true; in grow_ple_window()
5996 trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); in grow_ple_window()
6001 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window() local
6002 int old = vmx->ple_window; in shrink_ple_window()
6004 vmx->ple_window = __shrink_ple_window(old, in shrink_ple_window()
6007 if (vmx->ple_window != old) in shrink_ple_window()
6008 vmx->ple_window_dirty = true; in shrink_ple_window()
6010 trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); in shrink_ple_window()
6302 static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) in nested_get_current_vmcs02() argument
6305 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) in nested_get_current_vmcs02()
6306 if (item->vmptr == vmx->nested.current_vmptr) { in nested_get_current_vmcs02()
6307 list_move(&item->list, &vmx->nested.vmcs02_pool); in nested_get_current_vmcs02()
6311 if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { in nested_get_current_vmcs02()
6313 item = list_entry(vmx->nested.vmcs02_pool.prev, in nested_get_current_vmcs02()
6315 item->vmptr = vmx->nested.current_vmptr; in nested_get_current_vmcs02()
6316 list_move(&item->list, &vmx->nested.vmcs02_pool); in nested_get_current_vmcs02()
6330 item->vmptr = vmx->nested.current_vmptr; in nested_get_current_vmcs02()
6331 list_add(&(item->list), &(vmx->nested.vmcs02_pool)); in nested_get_current_vmcs02()
6332 vmx->nested.vmcs02_num++; in nested_get_current_vmcs02()
6337 static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) in nested_free_vmcs02() argument
6340 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) in nested_free_vmcs02()
6345 vmx->nested.vmcs02_num--; in nested_free_vmcs02()
6355 static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) in nested_free_all_saved_vmcss() argument
6359 WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); in nested_free_all_saved_vmcss()
6360 list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { in nested_free_all_saved_vmcss()
6365 if (vmx->loaded_vmcs == &item->vmcs02) in nested_free_all_saved_vmcss()
6371 vmx->nested.vmcs02_num--; in nested_free_all_saved_vmcss()
6426 struct vcpu_vmx *vmx = in vmx_preemption_timer_fn() local
6429 vmx->nested.preemption_timer_expired = true; in vmx_preemption_timer_fn()
6430 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); in vmx_preemption_timer_fn()
6431 kvm_vcpu_kick(&vmx->vcpu); in vmx_preemption_timer_fn()
6503 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmptr() local
6543 vmx->nested.vmxon_ptr = vmptr; in nested_vmx_check_vmptr()
6553 if (vmptr == vmx->nested.vmxon_ptr) { in nested_vmx_check_vmptr()
6568 if (vmptr == vmx->nested.vmxon_ptr) { in nested_vmx_check_vmptr()
6595 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmon() local
6626 if (vmx->nested.vmxon) { in handle_vmon()
6632 if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES) in handle_vmon()
6646 vmx->nested.current_shadow_vmcs = shadow_vmcs; in handle_vmon()
6649 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); in handle_vmon()
6650 vmx->nested.vmcs02_num = 0; in handle_vmon()
6652 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, in handle_vmon()
6654 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; in handle_vmon()
6656 vmx->nested.vmxon = true; in handle_vmon()
6671 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_permission() local
6673 if (!vmx->nested.vmxon) { in nested_vmx_check_permission()
6693 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) in nested_release_vmcs12() argument
6696 if (vmx->nested.current_vmptr == -1ull) in nested_release_vmcs12()
6700 if (WARN_ON(vmx->nested.current_vmcs12 == NULL)) in nested_release_vmcs12()
6706 copy_shadow_to_vmcs12(vmx); in nested_release_vmcs12()
6707 vmx->nested.sync_shadow_vmcs = false; in nested_release_vmcs12()
6713 vmx->nested.posted_intr_nv = -1; in nested_release_vmcs12()
6714 kunmap(vmx->nested.current_vmcs12_page); in nested_release_vmcs12()
6715 nested_release_page(vmx->nested.current_vmcs12_page); in nested_release_vmcs12()
6716 vmx->nested.current_vmptr = -1ull; in nested_release_vmcs12()
6717 vmx->nested.current_vmcs12 = NULL; in nested_release_vmcs12()
6724 static void free_nested(struct vcpu_vmx *vmx) in free_nested() argument
6726 if (!vmx->nested.vmxon) in free_nested()
6729 vmx->nested.vmxon = false; in free_nested()
6730 nested_release_vmcs12(vmx); in free_nested()
6732 free_vmcs(vmx->nested.current_shadow_vmcs); in free_nested()
6734 if (vmx->nested.apic_access_page) { in free_nested()
6735 nested_release_page(vmx->nested.apic_access_page); in free_nested()
6736 vmx->nested.apic_access_page = NULL; in free_nested()
6738 if (vmx->nested.virtual_apic_page) { in free_nested()
6739 nested_release_page(vmx->nested.virtual_apic_page); in free_nested()
6740 vmx->nested.virtual_apic_page = NULL; in free_nested()
6742 if (vmx->nested.pi_desc_page) { in free_nested()
6743 kunmap(vmx->nested.pi_desc_page); in free_nested()
6744 nested_release_page(vmx->nested.pi_desc_page); in free_nested()
6745 vmx->nested.pi_desc_page = NULL; in free_nested()
6746 vmx->nested.pi_desc = NULL; in free_nested()
6749 nested_free_all_saved_vmcss(vmx); in free_nested()
6766 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear() local
6777 if (vmptr == vmx->nested.current_vmptr) in handle_vmclear()
6778 nested_release_vmcs12(vmx); in handle_vmclear()
6797 nested_free_vmcs02(vmx, vmptr); in handle_vmclear()
6903 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) in copy_shadow_to_vmcs12() argument
6908 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; in copy_shadow_to_vmcs12()
6935 vmcs12_write_any(&vmx->vcpu, field, field_value); in copy_shadow_to_vmcs12()
6939 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_shadow_to_vmcs12()
6944 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) in copy_vmcs12_to_shadow() argument
6957 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; in copy_vmcs12_to_shadow()
6964 vmcs12_read_any(&vmx->vcpu, field, &field_value); in copy_vmcs12_to_shadow()
6987 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_vmcs12_to_shadow()
6996 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmcs12() local
6997 if (vmx->nested.current_vmptr == -1ull) { in nested_vmx_check_vmcs12()
7104 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld() local
7114 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
7133 nested_release_vmcs12(vmx); in handle_vmptrld()
7134 vmx->nested.current_vmptr = vmptr; in handle_vmptrld()
7135 vmx->nested.current_vmcs12 = new_vmcs12; in handle_vmptrld()
7136 vmx->nested.current_vmcs12_page = page; in handle_vmptrld()
7142 __pa(vmx->nested.current_shadow_vmcs)); in handle_vmptrld()
7143 vmx->nested.sync_shadow_vmcs = true; in handle_vmptrld()
7181 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept() local
7190 if (!(vmx->nested.nested_vmx_secondary_ctls_high & in handle_invept()
7192 !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { in handle_invept()
7208 types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; in handle_invept()
7500 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_exit_handled() local
7502 u32 exit_reason = vmx->exit_reason; in nested_vmx_exit_handled()
7506 vmx->idt_vectoring_info, in nested_vmx_exit_handled()
7511 if (vmx->nested.nested_run_pending) in nested_vmx_exit_handled()
7514 if (unlikely(vmx->fail)) { in nested_vmx_exit_handled()
7635 static int vmx_enable_pml(struct vcpu_vmx *vmx) in vmx_enable_pml() argument
7644 vmx->pml_pg = pml_pg; in vmx_enable_pml()
7646 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); in vmx_enable_pml()
7656 static void vmx_disable_pml(struct vcpu_vmx *vmx) in vmx_disable_pml() argument
7660 ASSERT(vmx->pml_pg); in vmx_disable_pml()
7661 __free_page(vmx->pml_pg); in vmx_disable_pml()
7662 vmx->pml_pg = NULL; in vmx_disable_pml()
7669 static void vmx_flush_pml_buffer(struct vcpu_vmx *vmx) in vmx_flush_pml_buffer() argument
7671 struct kvm *kvm = vmx->vcpu.kvm; in vmx_flush_pml_buffer()
7687 pml_buf = page_address(vmx->pml_pg); in vmx_flush_pml_buffer()
7724 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit() local
7725 u32 exit_reason = vmx->exit_reason; in vmx_handle_exit()
7726 u32 vectoring_info = vmx->idt_vectoring_info; in vmx_handle_exit()
7736 vmx_flush_pml_buffer(vmx); in vmx_handle_exit()
7739 if (vmx->emulation_required) in vmx_handle_exit()
7756 if (unlikely(vmx->fail)) { in vmx_handle_exit()
7782 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && in vmx_handle_exit()
7786 vmx->soft_vnmi_blocked = 0; in vmx_handle_exit()
7787 } else if (vmx->vnmi_blocked_time > 1000000000LL && in vmx_handle_exit()
7798 vmx->soft_vnmi_blocked = 0; in vmx_handle_exit()
7859 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_apic_access_page_addr() local
7875 !nested_cpu_has2(vmx->nested.current_vmcs12, in vmx_set_apic_access_page_addr()
7953 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) in vmx_complete_atomic_exit() argument
7957 if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY in vmx_complete_atomic_exit()
7958 || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)) in vmx_complete_atomic_exit()
7961 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); in vmx_complete_atomic_exit()
7962 exit_intr_info = vmx->exit_intr_info; in vmx_complete_atomic_exit()
7971 kvm_before_handle_nmi(&vmx->vcpu); in vmx_complete_atomic_exit()
7973 kvm_after_handle_nmi(&vmx->vcpu); in vmx_complete_atomic_exit()
7991 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_external_intr() local
7997 desc = (gate_desc *)vmx->host_idt_base + vector; in vmx_handle_external_intr()
8035 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) in vmx_recover_nmi_blocking() argument
8042 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; in vmx_recover_nmi_blocking()
8045 if (vmx->nmi_known_unmasked) in vmx_recover_nmi_blocking()
8069 vmx->nmi_known_unmasked = in vmx_recover_nmi_blocking()
8072 } else if (unlikely(vmx->soft_vnmi_blocked)) in vmx_recover_nmi_blocking()
8073 vmx->vnmi_blocked_time += in vmx_recover_nmi_blocking()
8074 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); in vmx_recover_nmi_blocking()
8131 static void vmx_complete_interrupts(struct vcpu_vmx *vmx) in vmx_complete_interrupts() argument
8133 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, in vmx_complete_interrupts()
8148 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) in atomic_switch_perf_msrs() argument
8160 clear_atomic_switch_msr(vmx, msrs[i].msr); in atomic_switch_perf_msrs()
8162 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, in atomic_switch_perf_msrs()
8168 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run() local
8172 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) in vmx_vcpu_run()
8173 vmx->entry_time = ktime_get(); in vmx_vcpu_run()
8177 if (vmx->emulation_required) in vmx_vcpu_run()
8180 if (vmx->ple_window_dirty) { in vmx_vcpu_run()
8181 vmx->ple_window_dirty = false; in vmx_vcpu_run()
8182 vmcs_write32(PLE_WINDOW, vmx->ple_window); in vmx_vcpu_run()
8185 if (vmx->nested.sync_shadow_vmcs) { in vmx_vcpu_run()
8186 copy_vmcs12_to_shadow(vmx); in vmx_vcpu_run()
8187 vmx->nested.sync_shadow_vmcs = false; in vmx_vcpu_run()
8196 if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { in vmx_vcpu_run()
8198 vmx->host_state.vmcs_host_cr4 = cr4; in vmx_vcpu_run()
8209 atomic_switch_perf_msrs(vmx); in vmx_vcpu_run()
8212 vmx->__launched = vmx->loaded_vmcs->launched; in vmx_vcpu_run()
8286 : : "c"(vmx), "d"((unsigned long)HOST_RSP), in vmx_vcpu_run()
8342 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); in vmx_vcpu_run()
8344 vmx->loaded_vmcs->launched = 1; in vmx_vcpu_run()
8346 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); in vmx_vcpu_run()
8347 trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); in vmx_vcpu_run()
8354 if (vmx->nested.nested_run_pending) in vmx_vcpu_run()
8357 vmx->nested.nested_run_pending = 0; in vmx_vcpu_run()
8359 vmx_complete_atomic_exit(vmx); in vmx_vcpu_run()
8360 vmx_recover_nmi_blocking(vmx); in vmx_vcpu_run()
8361 vmx_complete_interrupts(vmx); in vmx_vcpu_run()
8366 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_load_vmcs01() local
8369 if (vmx->loaded_vmcs == &vmx->vmcs01) in vmx_load_vmcs01()
8373 vmx->loaded_vmcs = &vmx->vmcs01; in vmx_load_vmcs01()
8382 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu() local
8385 vmx_disable_pml(vmx); in vmx_free_vcpu()
8386 free_vpid(vmx); in vmx_free_vcpu()
8389 free_nested(vmx); in vmx_free_vcpu()
8390 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_free_vcpu()
8391 kfree(vmx->guest_msrs); in vmx_free_vcpu()
8393 kmem_cache_free(kvm_vcpu_cache, vmx); in vmx_free_vcpu()
8399 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in vmx_create_vcpu() local
8402 if (!vmx) in vmx_create_vcpu()
8405 allocate_vpid(vmx); in vmx_create_vcpu()
8407 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); in vmx_create_vcpu()
8411 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); in vmx_create_vcpu()
8412 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) in vmx_create_vcpu()
8416 if (!vmx->guest_msrs) { in vmx_create_vcpu()
8420 vmx->loaded_vmcs = &vmx->vmcs01; in vmx_create_vcpu()
8421 vmx->loaded_vmcs->vmcs = alloc_vmcs(); in vmx_create_vcpu()
8422 if (!vmx->loaded_vmcs->vmcs) in vmx_create_vcpu()
8426 loaded_vmcs_init(vmx->loaded_vmcs); in vmx_create_vcpu()
8431 vmx_vcpu_load(&vmx->vcpu, cpu); in vmx_create_vcpu()
8432 vmx->vcpu.cpu = cpu; in vmx_create_vcpu()
8433 err = vmx_vcpu_setup(vmx); in vmx_create_vcpu()
8434 vmx_vcpu_put(&vmx->vcpu); in vmx_create_vcpu()
8454 nested_vmx_setup_ctls_msrs(vmx); in vmx_create_vcpu()
8456 vmx->nested.posted_intr_nv = -1; in vmx_create_vcpu()
8457 vmx->nested.current_vmptr = -1ull; in vmx_create_vcpu()
8458 vmx->nested.current_vmcs12 = NULL; in vmx_create_vcpu()
8467 err = vmx_enable_pml(vmx); in vmx_create_vcpu()
8472 return &vmx->vcpu; in vmx_create_vcpu()
8475 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_create_vcpu()
8477 kfree(vmx->guest_msrs); in vmx_create_vcpu()
8479 kvm_vcpu_uninit(&vmx->vcpu); in vmx_create_vcpu()
8481 free_vpid(vmx); in vmx_create_vcpu()
8482 kmem_cache_free(kvm_vcpu_cache, vmx); in vmx_create_vcpu()
8544 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_cpuid_update() local
8547 vmx->rdtscp_enabled = false; in vmx_cpuid_update()
8553 vmx->rdtscp_enabled = true; in vmx_cpuid_update()
8560 if (nested && !vmx->rdtscp_enabled) in vmx_cpuid_update()
8561 vmx->nested.nested_vmx_secondary_ctls_high &= in vmx_cpuid_update()
8662 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages() local
8676 if (vmx->nested.apic_access_page) /* shouldn't happen */ in nested_get_vmcs12_pages()
8677 nested_release_page(vmx->nested.apic_access_page); in nested_get_vmcs12_pages()
8678 vmx->nested.apic_access_page = in nested_get_vmcs12_pages()
8687 if (vmx->nested.virtual_apic_page) /* shouldn't happen */ in nested_get_vmcs12_pages()
8688 nested_release_page(vmx->nested.virtual_apic_page); in nested_get_vmcs12_pages()
8689 vmx->nested.virtual_apic_page = in nested_get_vmcs12_pages()
8702 if (!vmx->nested.virtual_apic_page) in nested_get_vmcs12_pages()
8711 if (vmx->nested.pi_desc_page) { /* shouldn't happen */ in nested_get_vmcs12_pages()
8712 kunmap(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
8713 nested_release_page(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
8715 vmx->nested.pi_desc_page = in nested_get_vmcs12_pages()
8717 if (!vmx->nested.pi_desc_page) in nested_get_vmcs12_pages()
8720 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
8721 (struct pi_desc *)kmap(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
8722 if (!vmx->nested.pi_desc) { in nested_get_vmcs12_pages()
8723 nested_release_page_clean(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
8726 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
8727 (struct pi_desc *)((void *)vmx->nested.pi_desc + in nested_get_vmcs12_pages()
8738 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer() local
8746 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); in vmx_start_preemption_timer()
8753 hrtimer_start(&vmx->nested.preemption_timer, in vmx_start_preemption_timer()
9070 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02() local
9115 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); in prepare_vmcs02()
9145 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; in prepare_vmcs02()
9146 vmx->nested.pi_pending = false; in prepare_vmcs02()
9149 page_to_phys(vmx->nested.pi_desc_page) + in prepare_vmcs02()
9157 vmx->nested.preemption_timer_expired = false; in prepare_vmcs02()
9187 exec_control = vmx_secondary_exec_control(vmx); in prepare_vmcs02()
9188 if (!vmx->rdtscp_enabled) in prepare_vmcs02()
9206 if (!vmx->nested.apic_access_page) in prepare_vmcs02()
9211 page_to_phys(vmx->nested.apic_access_page)); in prepare_vmcs02()
9213 (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))) { in prepare_vmcs02()
9242 vmx_set_constant_host_state(vmx); in prepare_vmcs02()
9251 vmx->host_rsp = 0; in prepare_vmcs02()
9253 exec_control = vmx_exec_control(vmx); /* L0's desires */ in prepare_vmcs02()
9261 page_to_phys(vmx->nested.virtual_apic_page)); in prepare_vmcs02()
9298 vm_entry_controls_init(vmx, in prepare_vmcs02()
9307 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
9310 set_cr4_guest_host_mask(vmx); in prepare_vmcs02()
9317 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); in prepare_vmcs02()
9319 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); in prepare_vmcs02()
9327 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in prepare_vmcs02()
9387 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run() local
9401 copy_shadow_to_vmcs12(vmx); in nested_vmx_run()
9447 vmx->nested.nested_vmx_true_procbased_ctls_low, in nested_vmx_run()
9448 vmx->nested.nested_vmx_procbased_ctls_high) || in nested_vmx_run()
9450 vmx->nested.nested_vmx_secondary_ctls_low, in nested_vmx_run()
9451 vmx->nested.nested_vmx_secondary_ctls_high) || in nested_vmx_run()
9453 vmx->nested.nested_vmx_pinbased_ctls_low, in nested_vmx_run()
9454 vmx->nested.nested_vmx_pinbased_ctls_high) || in nested_vmx_run()
9456 vmx->nested.nested_vmx_true_exit_ctls_low, in nested_vmx_run()
9457 vmx->nested.nested_vmx_exit_ctls_high) || in nested_vmx_run()
9459 vmx->nested.nested_vmx_true_entry_ctls_low, in nested_vmx_run()
9460 vmx->nested.nested_vmx_entry_ctls_high)) in nested_vmx_run()
9529 vmcs02 = nested_get_current_vmcs02(vmx); in nested_vmx_run()
9535 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); in nested_vmx_run()
9538 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); in nested_vmx_run()
9541 vmx->loaded_vmcs = vmcs02; in nested_vmx_run()
9547 vmx_segment_cache_clear(vmx); in nested_vmx_run()
9567 vmx->nested.nested_run_pending = 1; in nested_vmx_run()
9659 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events() local
9662 vmx->nested.preemption_timer_expired) { in vmx_check_nested_events()
9663 if (vmx->nested.nested_run_pending) in vmx_check_nested_events()
9670 if (vmx->nested.nested_run_pending || in vmx_check_nested_events()
9687 if (vmx->nested.nested_run_pending) in vmx_check_nested_events()
10015 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit() local
10019 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_vmexit()
10046 vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); in nested_vmx_vmexit()
10047 vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); in nested_vmx_vmexit()
10048 vmx_segment_cache_clear(vmx); in nested_vmx_vmexit()
10052 nested_free_vmcs02(vmx, vmx->nested.current_vmptr); in nested_vmx_vmexit()
10057 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); in nested_vmx_vmexit()
10060 vmx->host_rsp = 0; in nested_vmx_vmexit()
10063 if (vmx->nested.apic_access_page) { in nested_vmx_vmexit()
10064 nested_release_page(vmx->nested.apic_access_page); in nested_vmx_vmexit()
10065 vmx->nested.apic_access_page = NULL; in nested_vmx_vmexit()
10067 if (vmx->nested.virtual_apic_page) { in nested_vmx_vmexit()
10068 nested_release_page(vmx->nested.virtual_apic_page); in nested_vmx_vmexit()
10069 vmx->nested.virtual_apic_page = NULL; in nested_vmx_vmexit()
10071 if (vmx->nested.pi_desc_page) { in nested_vmx_vmexit()
10072 kunmap(vmx->nested.pi_desc_page); in nested_vmx_vmexit()
10073 nested_release_page(vmx->nested.pi_desc_page); in nested_vmx_vmexit()
10074 vmx->nested.pi_desc_page = NULL; in nested_vmx_vmexit()
10075 vmx->nested.pi_desc = NULL; in nested_vmx_vmexit()
10089 if (unlikely(vmx->fail)) { in nested_vmx_vmexit()
10090 vmx->fail = 0; in nested_vmx_vmexit()
10095 vmx->nested.sync_shadow_vmcs = true; in nested_vmx_vmexit()