Lines Matching refs:to_vmx
561 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) in to_vmx() function
784 return to_vmx(vcpu)->nested.current_vmcs12; in get_vmcs12()
1575 if (to_vmx(vcpu)->rmode.vm86_active) in update_exception_bitmap()
1811 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_save_host_state()
1921 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load()
1969 __vmx_load_host_state(to_vmx(vcpu)); in vmx_vcpu_put()
1971 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); in vmx_vcpu_put()
2048 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_rflags()
2050 save_rflags = to_vmx(vcpu)->rmode.save_rflags; in vmx_get_rflags()
2053 to_vmx(vcpu)->rflags = rflags; in vmx_get_rflags()
2055 return to_vmx(vcpu)->rflags; in vmx_get_rflags()
2061 to_vmx(vcpu)->rflags = rflags; in vmx_set_rflags()
2062 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_set_rflags()
2063 to_vmx(vcpu)->rmode.save_rflags = rflags; in vmx_set_rflags()
2121 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, in nested_vmx_check_exception()
2131 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception()
2271 to_vmx(vcpu)->nested.vmcs01_tsc_offset : in vmx_read_l1_tsc()
2310 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset; in vmx_write_tsc_offset()
2330 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; in vmx_adjust_tsc_offset()
2545 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_vmx_msr()
2662 vmx_load_host_state(to_vmx(vcpu)); in vmx_get_msr()
2663 data = to_vmx(vcpu)->msr_guest_kernel_gs_base; in vmx_get_msr()
2688 data = to_vmx(vcpu)->nested.msr_ia32_feature_control; in vmx_get_msr()
2700 if (!to_vmx(vcpu)->rdtscp_enabled) in vmx_get_msr()
2704 msr = find_msr_entry(to_vmx(vcpu), msr_index); in vmx_get_msr()
2725 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr()
2781 (to_vmx(vcpu)->nested.msr_ia32_feature_control & in vmx_set_msr()
3299 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode()
3372 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode()
3419 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer()
3429 vmx_load_host_state(to_vmx(vcpu)); in vmx_set_efer()
3432 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
3435 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
3448 vmx_segment_cache_clear(to_vmx(vcpu)); in enter_lmode()
3463 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in exit_lmode()
3471 vpid_sync_context(to_vmx(vcpu)); in vmx_flush_tlb()
3567 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0()
3650 (to_vmx(vcpu)->rmode.vm86_active ? in vmx_set_cr4()
3663 if (to_vmx(vcpu)->nested.vmxon && in vmx_set_cr4()
3695 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment()
3733 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_segment_base()
3737 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); in vmx_get_segment_base()
3742 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl()
3775 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment()
3815 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); in vmx_get_cs_db_l_bits()
4368 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt()
4418 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt()
4443 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt()
4461 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr()
4731 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset()
4870 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq()
4897 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi()
4929 return to_vmx(vcpu)->soft_vnmi_blocked; in vmx_get_nmi_mask()
4930 if (to_vmx(vcpu)->nmi_known_unmasked) in vmx_get_nmi_mask()
4937 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask()
4957 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_nmi_allowed()
4960 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) in vmx_nmi_allowed()
4970 return (!to_vmx(vcpu)->nested.nested_run_pending && in vmx_interrupt_allowed()
5001 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
5079 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception()
5238 if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high & in nested_cr0_valid()
5271 if (to_vmx(vcpu)->nested.vmxon && in handle_set_cr0()
5679 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch()
5773 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_ept_violation()
5910 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state()
5988 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window()
6001 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window()
6398 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { in nested_vmx_failValid()
6503 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmptr()
6595 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmon()
6671 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_permission()
6757 free_nested(to_vmx(vcpu)); in handle_vmoff()
6766 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear()
6996 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmcs12()
7104 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld()
7168 (void *)&to_vmx(vcpu)->nested.current_vmptr, in handle_vmptrst()
7181 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept()
7263 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_pml_full()
7500 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_exit_handled()
7724 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit()
7859 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_apic_access_page_addr()
7991 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_external_intr()
8168 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run()
8366 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_load_vmcs01()
8382 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu()
8544 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_cpuid_update()
8618 to_vmx(vcpu)->nested.nested_vmx_ept_caps & in nested_ept_init_mmu_context()
8652 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, in vmx_inject_page_fault_nested()
8662 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages()
8738 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer()
9070 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02()
9387 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run()
9659 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events()
9699 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); in vmx_get_preemption_timer_value()
9784 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); in prepare_vmcs12()
9808 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); in prepare_vmcs12()
10015 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit()
10108 free_nested(to_vmx(vcpu)); in vmx_leave_nested()
10127 to_vmx(vcpu)->nested.sync_shadow_vmcs = true; in nested_vmx_entry_failure()