Lines Matching refs:svm
205 static void svm_complete_interrupts(struct vcpu_svm *svm);
207 static int nested_svm_exit_handled(struct vcpu_svm *svm);
208 static int nested_svm_intercept(struct vcpu_svm *svm);
209 static int nested_svm_vmexit(struct vcpu_svm *svm);
210 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
254 static void recalc_intercepts(struct vcpu_svm *svm) in recalc_intercepts() argument
259 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in recalc_intercepts()
261 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
264 c = &svm->vmcb->control; in recalc_intercepts()
265 h = &svm->nested.hsave->control; in recalc_intercepts()
266 g = &svm->nested; in recalc_intercepts()
274 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) in get_host_vmcb() argument
276 if (is_guest_mode(&svm->vcpu)) in get_host_vmcb()
277 return svm->nested.hsave; in get_host_vmcb()
279 return svm->vmcb; in get_host_vmcb()
282 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit) in set_cr_intercept() argument
284 struct vmcb *vmcb = get_host_vmcb(svm); in set_cr_intercept()
288 recalc_intercepts(svm); in set_cr_intercept()
291 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit) in clr_cr_intercept() argument
293 struct vmcb *vmcb = get_host_vmcb(svm); in clr_cr_intercept()
297 recalc_intercepts(svm); in clr_cr_intercept()
300 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit) in is_cr_intercept() argument
302 struct vmcb *vmcb = get_host_vmcb(svm); in is_cr_intercept()
307 static inline void set_dr_intercepts(struct vcpu_svm *svm) in set_dr_intercepts() argument
309 struct vmcb *vmcb = get_host_vmcb(svm); in set_dr_intercepts()
328 recalc_intercepts(svm); in set_dr_intercepts()
331 static inline void clr_dr_intercepts(struct vcpu_svm *svm) in clr_dr_intercepts() argument
333 struct vmcb *vmcb = get_host_vmcb(svm); in clr_dr_intercepts()
337 recalc_intercepts(svm); in clr_dr_intercepts()
340 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit) in set_exception_intercept() argument
342 struct vmcb *vmcb = get_host_vmcb(svm); in set_exception_intercept()
346 recalc_intercepts(svm); in set_exception_intercept()
349 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit) in clr_exception_intercept() argument
351 struct vmcb *vmcb = get_host_vmcb(svm); in clr_exception_intercept()
355 recalc_intercepts(svm); in clr_exception_intercept()
358 static inline void set_intercept(struct vcpu_svm *svm, int bit) in set_intercept() argument
360 struct vmcb *vmcb = get_host_vmcb(svm); in set_intercept()
364 recalc_intercepts(svm); in set_intercept()
367 static inline void clr_intercept(struct vcpu_svm *svm, int bit) in clr_intercept() argument
369 struct vmcb *vmcb = get_host_vmcb(svm); in clr_intercept()
373 recalc_intercepts(svm); in clr_intercept()
376 static inline void enable_gif(struct vcpu_svm *svm) in enable_gif() argument
378 svm->vcpu.arch.hflags |= HF_GIF_MASK; in enable_gif()
381 static inline void disable_gif(struct vcpu_svm *svm) in disable_gif() argument
383 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; in disable_gif()
386 static inline bool gif_set(struct vcpu_svm *svm) in gif_set() argument
388 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); in gif_set()
491 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_interrupt_shadow() local
494 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow()
501 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_interrupt_shadow() local
504 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
506 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
512 struct vcpu_svm *svm = to_svm(vcpu); in skip_emulated_instruction() local
514 if (svm->vmcb->control.next_rip != 0) { in skip_emulated_instruction()
516 svm->next_rip = svm->vmcb->control.next_rip; in skip_emulated_instruction()
519 if (!svm->next_rip) { in skip_emulated_instruction()
525 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) in skip_emulated_instruction()
527 __func__, kvm_rip_read(vcpu), svm->next_rip); in skip_emulated_instruction()
529 kvm_rip_write(vcpu, svm->next_rip); in skip_emulated_instruction()
537 struct vcpu_svm *svm = to_svm(vcpu); in svm_queue_exception() local
544 nested_svm_check_exception(svm, nr, has_error_code, error_code)) in svm_queue_exception()
548 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
557 skip_emulated_instruction(&svm->vcpu); in svm_queue_exception()
558 rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
559 svm->int3_rip = rip + svm->vmcb->save.cs.base; in svm_queue_exception()
560 svm->int3_injected = rip - old_rip; in svm_queue_exception()
563 svm->vmcb->control.event_inj = nr in svm_queue_exception()
567 svm->vmcb->control.event_inj_err = error_code; in svm_queue_exception()
845 static void svm_enable_lbrv(struct vcpu_svm *svm) in svm_enable_lbrv() argument
847 u32 *msrpm = svm->msrpm; in svm_enable_lbrv()
849 svm->vmcb->control.lbr_ctl = 1; in svm_enable_lbrv()
856 static void svm_disable_lbrv(struct vcpu_svm *svm) in svm_disable_lbrv() argument
858 u32 *msrpm = svm->msrpm; in svm_disable_lbrv()
860 svm->vmcb->control.lbr_ctl = 0; in svm_disable_lbrv()
986 struct vcpu_svm *svm = to_svm(vcpu); in svm_scale_tsc() local
989 if (svm->tsc_ratio != TSC_RATIO_DEFAULT) in svm_scale_tsc()
990 _tsc = __scale_tsc(svm->tsc_ratio, tsc); in svm_scale_tsc()
997 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_tsc_khz() local
1003 svm->tsc_ratio = TSC_RATIO_DEFAULT; in svm_set_tsc_khz()
1028 svm->tsc_ratio = ratio; in svm_set_tsc_khz()
1033 struct vcpu_svm *svm = to_svm(vcpu); in svm_read_tsc_offset() local
1035 return svm->vmcb->control.tsc_offset; in svm_read_tsc_offset()
1040 struct vcpu_svm *svm = to_svm(vcpu); in svm_write_tsc_offset() local
1044 g_tsc_offset = svm->vmcb->control.tsc_offset - in svm_write_tsc_offset()
1045 svm->nested.hsave->control.tsc_offset; in svm_write_tsc_offset()
1046 svm->nested.hsave->control.tsc_offset = offset; in svm_write_tsc_offset()
1049 svm->vmcb->control.tsc_offset, in svm_write_tsc_offset()
1052 svm->vmcb->control.tsc_offset = offset + g_tsc_offset; in svm_write_tsc_offset()
1054 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_write_tsc_offset()
1059 struct vcpu_svm *svm = to_svm(vcpu); in svm_adjust_tsc_offset() local
1062 if (svm->tsc_ratio != TSC_RATIO_DEFAULT) in svm_adjust_tsc_offset()
1067 svm->vmcb->control.tsc_offset += adjustment; in svm_adjust_tsc_offset()
1069 svm->nested.hsave->control.tsc_offset += adjustment; in svm_adjust_tsc_offset()
1072 svm->vmcb->control.tsc_offset - adjustment, in svm_adjust_tsc_offset()
1073 svm->vmcb->control.tsc_offset); in svm_adjust_tsc_offset()
1075 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_adjust_tsc_offset()
1087 static void init_vmcb(struct vcpu_svm *svm) in init_vmcb() argument
1089 struct vmcb_control_area *control = &svm->vmcb->control; in init_vmcb()
1090 struct vmcb_save_area *save = &svm->vmcb->save; in init_vmcb()
1092 svm->vcpu.fpu_active = 1; in init_vmcb()
1093 svm->vcpu.arch.hflags = 0; in init_vmcb()
1095 set_cr_intercept(svm, INTERCEPT_CR0_READ); in init_vmcb()
1096 set_cr_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1097 set_cr_intercept(svm, INTERCEPT_CR4_READ); in init_vmcb()
1098 set_cr_intercept(svm, INTERCEPT_CR0_WRITE); in init_vmcb()
1099 set_cr_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1100 set_cr_intercept(svm, INTERCEPT_CR4_WRITE); in init_vmcb()
1101 set_cr_intercept(svm, INTERCEPT_CR8_WRITE); in init_vmcb()
1103 set_dr_intercepts(svm); in init_vmcb()
1105 set_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1106 set_exception_intercept(svm, UD_VECTOR); in init_vmcb()
1107 set_exception_intercept(svm, MC_VECTOR); in init_vmcb()
1108 set_exception_intercept(svm, AC_VECTOR); in init_vmcb()
1109 set_exception_intercept(svm, DB_VECTOR); in init_vmcb()
1111 set_intercept(svm, INTERCEPT_INTR); in init_vmcb()
1112 set_intercept(svm, INTERCEPT_NMI); in init_vmcb()
1113 set_intercept(svm, INTERCEPT_SMI); in init_vmcb()
1114 set_intercept(svm, INTERCEPT_SELECTIVE_CR0); in init_vmcb()
1115 set_intercept(svm, INTERCEPT_RDPMC); in init_vmcb()
1116 set_intercept(svm, INTERCEPT_CPUID); in init_vmcb()
1117 set_intercept(svm, INTERCEPT_INVD); in init_vmcb()
1118 set_intercept(svm, INTERCEPT_HLT); in init_vmcb()
1119 set_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1120 set_intercept(svm, INTERCEPT_INVLPGA); in init_vmcb()
1121 set_intercept(svm, INTERCEPT_IOIO_PROT); in init_vmcb()
1122 set_intercept(svm, INTERCEPT_MSR_PROT); in init_vmcb()
1123 set_intercept(svm, INTERCEPT_TASK_SWITCH); in init_vmcb()
1124 set_intercept(svm, INTERCEPT_SHUTDOWN); in init_vmcb()
1125 set_intercept(svm, INTERCEPT_VMRUN); in init_vmcb()
1126 set_intercept(svm, INTERCEPT_VMMCALL); in init_vmcb()
1127 set_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb()
1128 set_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb()
1129 set_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1130 set_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1131 set_intercept(svm, INTERCEPT_SKINIT); in init_vmcb()
1132 set_intercept(svm, INTERCEPT_WBINVD); in init_vmcb()
1133 set_intercept(svm, INTERCEPT_MONITOR); in init_vmcb()
1134 set_intercept(svm, INTERCEPT_MWAIT); in init_vmcb()
1135 set_intercept(svm, INTERCEPT_XSETBV); in init_vmcb()
1138 control->msrpm_base_pa = __pa(svm->msrpm); in init_vmcb()
1160 svm_set_efer(&svm->vcpu, 0); in init_vmcb()
1162 kvm_set_rflags(&svm->vcpu, 2); in init_vmcb()
1164 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; in init_vmcb()
1170 svm->vcpu.arch.cr0 = 0; in init_vmcb()
1171 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); in init_vmcb()
1179 clr_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1180 clr_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1181 clr_cr_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1182 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1187 svm->asid_generation = 0; in init_vmcb()
1189 svm->nested.vmcb = 0; in init_vmcb()
1190 svm->vcpu.arch.hflags = 0; in init_vmcb()
1194 set_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1197 mark_all_dirty(svm->vmcb); in init_vmcb()
1199 enable_gif(svm); in init_vmcb()
1204 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_reset() local
1208 init_vmcb(svm); in svm_vcpu_reset()
1216 struct vcpu_svm *svm; in svm_create_vcpu() local
1223 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in svm_create_vcpu()
1224 if (!svm) { in svm_create_vcpu()
1229 svm->tsc_ratio = TSC_RATIO_DEFAULT; in svm_create_vcpu()
1231 err = kvm_vcpu_init(&svm->vcpu, kvm, id); in svm_create_vcpu()
1252 svm->nested.hsave = page_address(hsave_page); in svm_create_vcpu()
1254 svm->msrpm = page_address(msrpm_pages); in svm_create_vcpu()
1255 svm_vcpu_init_msrpm(svm->msrpm); in svm_create_vcpu()
1257 svm->nested.msrpm = page_address(nested_msrpm_pages); in svm_create_vcpu()
1258 svm_vcpu_init_msrpm(svm->nested.msrpm); in svm_create_vcpu()
1260 svm->vmcb = page_address(page); in svm_create_vcpu()
1261 clear_page(svm->vmcb); in svm_create_vcpu()
1262 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; in svm_create_vcpu()
1263 svm->asid_generation = 0; in svm_create_vcpu()
1264 init_vmcb(svm); in svm_create_vcpu()
1266 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | in svm_create_vcpu()
1268 if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) in svm_create_vcpu()
1269 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; in svm_create_vcpu()
1271 svm_init_osvw(&svm->vcpu); in svm_create_vcpu()
1273 return &svm->vcpu; in svm_create_vcpu()
1282 kvm_vcpu_uninit(&svm->vcpu); in svm_create_vcpu()
1284 kmem_cache_free(kvm_vcpu_cache, svm); in svm_create_vcpu()
1291 struct vcpu_svm *svm = to_svm(vcpu); in svm_free_vcpu() local
1293 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); in svm_free_vcpu()
1294 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); in svm_free_vcpu()
1295 __free_page(virt_to_page(svm->nested.hsave)); in svm_free_vcpu()
1296 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); in svm_free_vcpu()
1298 kmem_cache_free(kvm_vcpu_cache, svm); in svm_free_vcpu()
1303 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_load() local
1307 svm->asid_generation = 0; in svm_vcpu_load()
1308 mark_all_dirty(svm->vmcb); in svm_vcpu_load()
1314 savesegment(fs, svm->host.fs); in svm_vcpu_load()
1315 savesegment(gs, svm->host.gs); in svm_vcpu_load()
1316 svm->host.ldt = kvm_read_ldt(); in svm_vcpu_load()
1319 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); in svm_vcpu_load()
1322 svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) { in svm_vcpu_load()
1323 __this_cpu_write(current_tsc_ratio, svm->tsc_ratio); in svm_vcpu_load()
1324 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); in svm_vcpu_load()
1330 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_put() local
1334 kvm_load_ldt(svm->host.ldt); in svm_vcpu_put()
1336 loadsegment(fs, svm->host.fs); in svm_vcpu_put()
1338 load_gs_index(svm->host.gs); in svm_vcpu_put()
1341 loadsegment(gs, svm->host.gs); in svm_vcpu_put()
1345 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); in svm_vcpu_put()
1375 static void svm_set_vintr(struct vcpu_svm *svm) in svm_set_vintr() argument
1377 set_intercept(svm, INTERCEPT_VINTR); in svm_set_vintr()
1380 static void svm_clear_vintr(struct vcpu_svm *svm) in svm_clear_vintr() argument
1382 clr_intercept(svm, INTERCEPT_VINTR); in svm_clear_vintr()
1487 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_idt() local
1489 dt->size = svm->vmcb->save.idtr.limit; in svm_get_idt()
1490 dt->address = svm->vmcb->save.idtr.base; in svm_get_idt()
1495 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_idt() local
1497 svm->vmcb->save.idtr.limit = dt->size; in svm_set_idt()
1498 svm->vmcb->save.idtr.base = dt->address ; in svm_set_idt()
1499 mark_dirty(svm->vmcb, VMCB_DT); in svm_set_idt()
1504 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_gdt() local
1506 dt->size = svm->vmcb->save.gdtr.limit; in svm_get_gdt()
1507 dt->address = svm->vmcb->save.gdtr.base; in svm_get_gdt()
1512 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_gdt() local
1514 svm->vmcb->save.gdtr.limit = dt->size; in svm_set_gdt()
1515 svm->vmcb->save.gdtr.base = dt->address ; in svm_set_gdt()
1516 mark_dirty(svm->vmcb, VMCB_DT); in svm_set_gdt()
1531 static void update_cr0_intercept(struct vcpu_svm *svm) in update_cr0_intercept() argument
1533 ulong gcr0 = svm->vcpu.arch.cr0; in update_cr0_intercept()
1534 u64 *hcr0 = &svm->vmcb->save.cr0; in update_cr0_intercept()
1536 if (!svm->vcpu.fpu_active) in update_cr0_intercept()
1542 mark_dirty(svm->vmcb, VMCB_CR); in update_cr0_intercept()
1544 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { in update_cr0_intercept()
1545 clr_cr_intercept(svm, INTERCEPT_CR0_READ); in update_cr0_intercept()
1546 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE); in update_cr0_intercept()
1548 set_cr_intercept(svm, INTERCEPT_CR0_READ); in update_cr0_intercept()
1549 set_cr_intercept(svm, INTERCEPT_CR0_WRITE); in update_cr0_intercept()
1555 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr0() local
1561 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; in svm_set_cr0()
1566 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); in svm_set_cr0()
1583 svm->vmcb->save.cr0 = cr0; in svm_set_cr0()
1584 mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr0()
1585 update_cr0_intercept(svm); in svm_set_cr0()
1611 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_segment() local
1637 svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; in svm_set_segment()
1639 mark_dirty(svm->vmcb, VMCB_SEG); in svm_set_segment()
1644 struct vcpu_svm *svm = to_svm(vcpu); in update_bp_intercept() local
1646 clr_exception_intercept(svm, BP_VECTOR); in update_bp_intercept()
1650 set_exception_intercept(svm, BP_VECTOR); in update_bp_intercept()
1655 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) in new_asid() argument
1660 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; in new_asid()
1663 svm->asid_generation = sd->asid_generation; in new_asid()
1664 svm->vmcb->control.asid = sd->next_asid++; in new_asid()
1666 mark_dirty(svm->vmcb, VMCB_ASID); in new_asid()
1676 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr6() local
1678 svm->vmcb->save.dr6 = value; in svm_set_dr6()
1679 mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr6()
1684 struct vcpu_svm *svm = to_svm(vcpu); in svm_sync_dirty_debug_regs() local
1691 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
1694 set_dr_intercepts(svm); in svm_sync_dirty_debug_regs()
1699 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr7() local
1701 svm->vmcb->save.dr7 = value; in svm_set_dr7()
1702 mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr7()
1705 static int pf_interception(struct vcpu_svm *svm) in pf_interception() argument
1707 u64 fault_address = svm->vmcb->control.exit_info_2; in pf_interception()
1711 switch (svm->apf_reason) { in pf_interception()
1713 error_code = svm->vmcb->control.exit_info_1; in pf_interception()
1716 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) in pf_interception()
1717 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); in pf_interception()
1718 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, in pf_interception()
1719 svm->vmcb->control.insn_bytes, in pf_interception()
1720 svm->vmcb->control.insn_len); in pf_interception()
1723 svm->apf_reason = 0; in pf_interception()
1729 svm->apf_reason = 0; in pf_interception()
1738 static int db_interception(struct vcpu_svm *svm) in db_interception() argument
1740 struct kvm_run *kvm_run = svm->vcpu.run; in db_interception()
1742 if (!(svm->vcpu.guest_debug & in db_interception()
1744 !svm->nmi_singlestep) { in db_interception()
1745 kvm_queue_exception(&svm->vcpu, DB_VECTOR); in db_interception()
1749 if (svm->nmi_singlestep) { in db_interception()
1750 svm->nmi_singlestep = false; in db_interception()
1751 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) in db_interception()
1752 svm->vmcb->save.rflags &= in db_interception()
1756 if (svm->vcpu.guest_debug & in db_interception()
1760 svm->vmcb->save.cs.base + svm->vmcb->save.rip; in db_interception()
1768 static int bp_interception(struct vcpu_svm *svm) in bp_interception() argument
1770 struct kvm_run *kvm_run = svm->vcpu.run; in bp_interception()
1773 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; in bp_interception()
1778 static int ud_interception(struct vcpu_svm *svm) in ud_interception() argument
1782 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); in ud_interception()
1784 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in ud_interception()
1788 static int ac_interception(struct vcpu_svm *svm) in ac_interception() argument
1790 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); in ac_interception()
1796 struct vcpu_svm *svm = to_svm(vcpu); in svm_fpu_activate() local
1798 clr_exception_intercept(svm, NM_VECTOR); in svm_fpu_activate()
1800 svm->vcpu.fpu_active = 1; in svm_fpu_activate()
1801 update_cr0_intercept(svm); in svm_fpu_activate()
1804 static int nm_interception(struct vcpu_svm *svm) in nm_interception() argument
1806 svm_fpu_activate(&svm->vcpu); in nm_interception()
1849 static void svm_handle_mce(struct vcpu_svm *svm) in svm_handle_mce() argument
1858 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); in svm_handle_mce()
1874 static int mc_interception(struct vcpu_svm *svm) in mc_interception() argument
1879 static int shutdown_interception(struct vcpu_svm *svm) in shutdown_interception() argument
1881 struct kvm_run *kvm_run = svm->vcpu.run; in shutdown_interception()
1887 clear_page(svm->vmcb); in shutdown_interception()
1888 init_vmcb(svm); in shutdown_interception()
1894 static int io_interception(struct vcpu_svm *svm) in io_interception() argument
1896 struct kvm_vcpu *vcpu = &svm->vcpu; in io_interception()
1897 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ in io_interception()
1901 ++svm->vcpu.stat.io_exits; in io_interception()
1909 svm->next_rip = svm->vmcb->control.exit_info_2; in io_interception()
1910 skip_emulated_instruction(&svm->vcpu); in io_interception()
1915 static int nmi_interception(struct vcpu_svm *svm) in nmi_interception() argument
1920 static int intr_interception(struct vcpu_svm *svm) in intr_interception() argument
1922 ++svm->vcpu.stat.irq_exits; in intr_interception()
1926 static int nop_on_interception(struct vcpu_svm *svm) in nop_on_interception() argument
1931 static int halt_interception(struct vcpu_svm *svm) in halt_interception() argument
1933 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; in halt_interception()
1934 return kvm_emulate_halt(&svm->vcpu); in halt_interception()
1937 static int vmmcall_interception(struct vcpu_svm *svm) in vmmcall_interception() argument
1939 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmmcall_interception()
1940 kvm_emulate_hypercall(&svm->vcpu); in vmmcall_interception()
1946 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3() local
1948 return svm->nested.nested_cr3; in nested_svm_get_tdp_cr3()
1953 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr() local
1954 u64 cr3 = svm->nested.nested_cr3; in nested_svm_get_tdp_pdptr()
1968 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_set_tdp_cr3() local
1970 svm->vmcb->control.nested_cr3 = root; in nested_svm_set_tdp_cr3()
1971 mark_dirty(svm->vmcb, VMCB_NPT); in nested_svm_set_tdp_cr3()
1978 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_npf_exit() local
1980 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { in nested_svm_inject_npf_exit()
1985 svm->vmcb->control.exit_code = SVM_EXIT_NPF; in nested_svm_inject_npf_exit()
1986 svm->vmcb->control.exit_code_hi = 0; in nested_svm_inject_npf_exit()
1987 svm->vmcb->control.exit_info_1 = (1ULL << 32); in nested_svm_inject_npf_exit()
1988 svm->vmcb->control.exit_info_2 = fault->address; in nested_svm_inject_npf_exit()
1991 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; in nested_svm_inject_npf_exit()
1992 svm->vmcb->control.exit_info_1 |= fault->error_code; in nested_svm_inject_npf_exit()
1998 if (svm->vmcb->control.exit_info_1 & (2ULL << 32)) in nested_svm_inject_npf_exit()
1999 svm->vmcb->control.exit_info_1 &= ~1; in nested_svm_inject_npf_exit()
2001 nested_svm_vmexit(svm); in nested_svm_inject_npf_exit()
2021 static int nested_svm_check_permissions(struct vcpu_svm *svm) in nested_svm_check_permissions() argument
2023 if (!(svm->vcpu.arch.efer & EFER_SVME) in nested_svm_check_permissions()
2024 || !is_paging(&svm->vcpu)) { in nested_svm_check_permissions()
2025 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in nested_svm_check_permissions()
2029 if (svm->vmcb->save.cpl) { in nested_svm_check_permissions()
2030 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_check_permissions()
2037 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, in nested_svm_check_exception() argument
2042 if (!is_guest_mode(&svm->vcpu)) in nested_svm_check_exception()
2045 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; in nested_svm_check_exception()
2046 svm->vmcb->control.exit_code_hi = 0; in nested_svm_check_exception()
2047 svm->vmcb->control.exit_info_1 = error_code; in nested_svm_check_exception()
2048 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; in nested_svm_check_exception()
2050 vmexit = nested_svm_intercept(svm); in nested_svm_check_exception()
2052 svm->nested.exit_required = true; in nested_svm_check_exception()
2058 static inline bool nested_svm_intr(struct vcpu_svm *svm) in nested_svm_intr() argument
2060 if (!is_guest_mode(&svm->vcpu)) in nested_svm_intr()
2063 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) in nested_svm_intr()
2066 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) in nested_svm_intr()
2074 if (svm->nested.exit_required) in nested_svm_intr()
2077 svm->vmcb->control.exit_code = SVM_EXIT_INTR; in nested_svm_intr()
2078 svm->vmcb->control.exit_info_1 = 0; in nested_svm_intr()
2079 svm->vmcb->control.exit_info_2 = 0; in nested_svm_intr()
2081 if (svm->nested.intercept & 1ULL) { in nested_svm_intr()
2088 svm->nested.exit_required = true; in nested_svm_intr()
2089 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); in nested_svm_intr()
2097 static inline bool nested_svm_nmi(struct vcpu_svm *svm) in nested_svm_nmi() argument
2099 if (!is_guest_mode(&svm->vcpu)) in nested_svm_nmi()
2102 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) in nested_svm_nmi()
2105 svm->vmcb->control.exit_code = SVM_EXIT_NMI; in nested_svm_nmi()
2106 svm->nested.exit_required = true; in nested_svm_nmi()
2111 static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page) in nested_svm_map() argument
2117 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT); in nested_svm_map()
2126 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_map()
2137 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) in nested_svm_intercept_ioio() argument
2144 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) in nested_svm_intercept_ioio()
2147 port = svm->vmcb->control.exit_info_1 >> 16; in nested_svm_intercept_ioio()
2148 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> in nested_svm_intercept_ioio()
2150 gpa = svm->nested.vmcb_iopm + (port / 8); in nested_svm_intercept_ioio()
2156 if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len)) in nested_svm_intercept_ioio()
2162 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) in nested_svm_exit_handled_msr() argument
2167 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) in nested_svm_exit_handled_msr()
2170 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in nested_svm_exit_handled_msr()
2172 write = svm->vmcb->control.exit_info_1 & 1; in nested_svm_exit_handled_msr()
2181 if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4)) in nested_svm_exit_handled_msr()
2187 static int nested_svm_exit_special(struct vcpu_svm *svm) in nested_svm_exit_special() argument
2189 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_exit_special()
2203 if (!npt_enabled && svm->apf_reason == 0) in nested_svm_exit_special()
2207 nm_interception(svm); in nested_svm_exit_special()
2219 static int nested_svm_intercept(struct vcpu_svm *svm) in nested_svm_intercept() argument
2221 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_intercept()
2226 vmexit = nested_svm_exit_handled_msr(svm); in nested_svm_intercept()
2229 vmexit = nested_svm_intercept_ioio(svm); in nested_svm_intercept()
2233 if (svm->nested.intercept_cr & bit) in nested_svm_intercept()
2239 if (svm->nested.intercept_dr & bit) in nested_svm_intercept()
2245 if (svm->nested.intercept_exceptions & excp_bits) in nested_svm_intercept()
2249 svm->apf_reason != 0) in nested_svm_intercept()
2259 if (svm->nested.intercept & exit_bits) in nested_svm_intercept()
2267 static int nested_svm_exit_handled(struct vcpu_svm *svm) in nested_svm_exit_handled() argument
2271 vmexit = nested_svm_intercept(svm); in nested_svm_exit_handled()
2274 nested_svm_vmexit(svm); in nested_svm_exit_handled()
2309 static int nested_svm_vmexit(struct vcpu_svm *svm) in nested_svm_vmexit() argument
2312 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmexit()
2313 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmexit()
2323 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page); in nested_svm_vmexit()
2328 leave_guest_mode(&svm->vcpu); in nested_svm_vmexit()
2329 svm->nested.vmcb = 0; in nested_svm_vmexit()
2332 disable_gif(svm); in nested_svm_vmexit()
2340 nested_vmcb->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit()
2341 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmexit()
2342 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmexit()
2344 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmexit()
2345 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmexit()
2384 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) in nested_svm_vmexit()
2390 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmexit()
2391 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmexit()
2393 svm->nested.nested_cr3 = 0; in nested_svm_vmexit()
2396 svm->vmcb->save.es = hsave->save.es; in nested_svm_vmexit()
2397 svm->vmcb->save.cs = hsave->save.cs; in nested_svm_vmexit()
2398 svm->vmcb->save.ss = hsave->save.ss; in nested_svm_vmexit()
2399 svm->vmcb->save.ds = hsave->save.ds; in nested_svm_vmexit()
2400 svm->vmcb->save.gdtr = hsave->save.gdtr; in nested_svm_vmexit()
2401 svm->vmcb->save.idtr = hsave->save.idtr; in nested_svm_vmexit()
2402 kvm_set_rflags(&svm->vcpu, hsave->save.rflags); in nested_svm_vmexit()
2403 svm_set_efer(&svm->vcpu, hsave->save.efer); in nested_svm_vmexit()
2404 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); in nested_svm_vmexit()
2405 svm_set_cr4(&svm->vcpu, hsave->save.cr4); in nested_svm_vmexit()
2407 svm->vmcb->save.cr3 = hsave->save.cr3; in nested_svm_vmexit()
2408 svm->vcpu.arch.cr3 = hsave->save.cr3; in nested_svm_vmexit()
2410 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); in nested_svm_vmexit()
2412 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); in nested_svm_vmexit()
2413 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); in nested_svm_vmexit()
2414 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip); in nested_svm_vmexit()
2415 svm->vmcb->save.dr7 = 0; in nested_svm_vmexit()
2416 svm->vmcb->save.cpl = 0; in nested_svm_vmexit()
2417 svm->vmcb->control.exit_int_info = 0; in nested_svm_vmexit()
2419 mark_all_dirty(svm->vmcb); in nested_svm_vmexit()
2423 nested_svm_uninit_mmu_context(&svm->vcpu); in nested_svm_vmexit()
2424 kvm_mmu_reset_context(&svm->vcpu); in nested_svm_vmexit()
2425 kvm_mmu_load(&svm->vcpu); in nested_svm_vmexit()
2430 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) in nested_svm_vmrun_msrpm() argument
2439 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm()
2450 offset = svm->nested.vmcb_msrpm + (p * 4); in nested_svm_vmrun_msrpm()
2452 if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4)) in nested_svm_vmrun_msrpm()
2455 svm->nested.msrpm[p] = svm->msrpm[p] | value; in nested_svm_vmrun_msrpm()
2458 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); in nested_svm_vmrun_msrpm()
2477 static bool nested_svm_vmrun(struct vcpu_svm *svm) in nested_svm_vmrun() argument
2480 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmrun()
2481 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmrun()
2485 vmcb_gpa = svm->vmcb->save.rax; in nested_svm_vmrun()
2487 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); in nested_svm_vmrun()
2502 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, in nested_svm_vmrun()
2514 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmrun()
2515 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmrun()
2527 hsave->save.efer = svm->vcpu.arch.efer; in nested_svm_vmrun()
2528 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmrun()
2529 hsave->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmrun()
2530 hsave->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmrun()
2531 hsave->save.rip = kvm_rip_read(&svm->vcpu); in nested_svm_vmrun()
2537 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmrun()
2541 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) in nested_svm_vmrun()
2542 svm->vcpu.arch.hflags |= HF_HIF_MASK; in nested_svm_vmrun()
2544 svm->vcpu.arch.hflags &= ~HF_HIF_MASK; in nested_svm_vmrun()
2547 kvm_mmu_unload(&svm->vcpu); in nested_svm_vmrun()
2548 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; in nested_svm_vmrun()
2549 nested_svm_init_mmu_context(&svm->vcpu); in nested_svm_vmrun()
2553 svm->vmcb->save.es = nested_vmcb->save.es; in nested_svm_vmrun()
2554 svm->vmcb->save.cs = nested_vmcb->save.cs; in nested_svm_vmrun()
2555 svm->vmcb->save.ss = nested_vmcb->save.ss; in nested_svm_vmrun()
2556 svm->vmcb->save.ds = nested_vmcb->save.ds; in nested_svm_vmrun()
2557 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; in nested_svm_vmrun()
2558 svm->vmcb->save.idtr = nested_vmcb->save.idtr; in nested_svm_vmrun()
2559 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); in nested_svm_vmrun()
2560 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); in nested_svm_vmrun()
2561 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); in nested_svm_vmrun()
2562 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); in nested_svm_vmrun()
2564 svm->vmcb->save.cr3 = nested_vmcb->save.cr3; in nested_svm_vmrun()
2565 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; in nested_svm_vmrun()
2567 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); in nested_svm_vmrun()
2570 kvm_mmu_reset_context(&svm->vcpu); in nested_svm_vmrun()
2572 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; in nested_svm_vmrun()
2573 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); in nested_svm_vmrun()
2574 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); in nested_svm_vmrun()
2575 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); in nested_svm_vmrun()
2578 svm->vmcb->save.rax = nested_vmcb->save.rax; in nested_svm_vmrun()
2579 svm->vmcb->save.rsp = nested_vmcb->save.rsp; in nested_svm_vmrun()
2580 svm->vmcb->save.rip = nested_vmcb->save.rip; in nested_svm_vmrun()
2581 svm->vmcb->save.dr7 = nested_vmcb->save.dr7; in nested_svm_vmrun()
2582 svm->vmcb->save.dr6 = nested_vmcb->save.dr6; in nested_svm_vmrun()
2583 svm->vmcb->save.cpl = nested_vmcb->save.cpl; in nested_svm_vmrun()
2585 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL; in nested_svm_vmrun()
2586 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; in nested_svm_vmrun()
2589 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; in nested_svm_vmrun()
2590 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; in nested_svm_vmrun()
2591 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; in nested_svm_vmrun()
2592 svm->nested.intercept = nested_vmcb->control.intercept; in nested_svm_vmrun()
2594 svm_flush_tlb(&svm->vcpu); in nested_svm_vmrun()
2595 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; in nested_svm_vmrun()
2597 svm->vcpu.arch.hflags |= HF_VINTR_MASK; in nested_svm_vmrun()
2599 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; in nested_svm_vmrun()
2601 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { in nested_svm_vmrun()
2603 clr_cr_intercept(svm, INTERCEPT_CR8_READ); in nested_svm_vmrun()
2604 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); in nested_svm_vmrun()
2608 clr_intercept(svm, INTERCEPT_VMMCALL); in nested_svm_vmrun()
2610 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl; in nested_svm_vmrun()
2611 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; in nested_svm_vmrun()
2612 svm->vmcb->control.int_state = nested_vmcb->control.int_state; in nested_svm_vmrun()
2613 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; in nested_svm_vmrun()
2614 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; in nested_svm_vmrun()
2615 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; in nested_svm_vmrun()
2620 enter_guest_mode(&svm->vcpu); in nested_svm_vmrun()
2626 recalc_intercepts(svm); in nested_svm_vmrun()
2628 svm->nested.vmcb = vmcb_gpa; in nested_svm_vmrun()
2630 enable_gif(svm); in nested_svm_vmrun()
2632 mark_all_dirty(svm->vmcb); in nested_svm_vmrun()
2653 static int vmload_interception(struct vcpu_svm *svm) in vmload_interception() argument
2658 if (nested_svm_check_permissions(svm)) in vmload_interception()
2661 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); in vmload_interception()
2665 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmload_interception()
2666 skip_emulated_instruction(&svm->vcpu); in vmload_interception()
2668 nested_svm_vmloadsave(nested_vmcb, svm->vmcb); in vmload_interception()
2674 static int vmsave_interception(struct vcpu_svm *svm) in vmsave_interception() argument
2679 if (nested_svm_check_permissions(svm)) in vmsave_interception()
2682 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); in vmsave_interception()
2686 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmsave_interception()
2687 skip_emulated_instruction(&svm->vcpu); in vmsave_interception()
2689 nested_svm_vmloadsave(svm->vmcb, nested_vmcb); in vmsave_interception()
2695 static int vmrun_interception(struct vcpu_svm *svm) in vmrun_interception() argument
2697 if (nested_svm_check_permissions(svm)) in vmrun_interception()
2701 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3); in vmrun_interception()
2703 if (!nested_svm_vmrun(svm)) in vmrun_interception()
2706 if (!nested_svm_vmrun_msrpm(svm)) in vmrun_interception()
2713 svm->vmcb->control.exit_code = SVM_EXIT_ERR; in vmrun_interception()
2714 svm->vmcb->control.exit_code_hi = 0; in vmrun_interception()
2715 svm->vmcb->control.exit_info_1 = 0; in vmrun_interception()
2716 svm->vmcb->control.exit_info_2 = 0; in vmrun_interception()
2718 nested_svm_vmexit(svm); in vmrun_interception()
2723 static int stgi_interception(struct vcpu_svm *svm) in stgi_interception() argument
2725 if (nested_svm_check_permissions(svm)) in stgi_interception()
2728 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in stgi_interception()
2729 skip_emulated_instruction(&svm->vcpu); in stgi_interception()
2730 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in stgi_interception()
2732 enable_gif(svm); in stgi_interception()
2737 static int clgi_interception(struct vcpu_svm *svm) in clgi_interception() argument
2739 if (nested_svm_check_permissions(svm)) in clgi_interception()
2742 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in clgi_interception()
2743 skip_emulated_instruction(&svm->vcpu); in clgi_interception()
2745 disable_gif(svm); in clgi_interception()
2748 svm_clear_vintr(svm); in clgi_interception()
2749 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; in clgi_interception()
2751 mark_dirty(svm->vmcb, VMCB_INTR); in clgi_interception()
2756 static int invlpga_interception(struct vcpu_svm *svm) in invlpga_interception() argument
2758 struct kvm_vcpu *vcpu = &svm->vcpu; in invlpga_interception()
2760 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX), in invlpga_interception()
2761 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in invlpga_interception()
2764 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in invlpga_interception()
2766 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in invlpga_interception()
2767 skip_emulated_instruction(&svm->vcpu); in invlpga_interception()
2771 static int skinit_interception(struct vcpu_svm *svm) in skinit_interception() argument
2773 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in skinit_interception()
2775 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in skinit_interception()
2779 static int wbinvd_interception(struct vcpu_svm *svm) in wbinvd_interception() argument
2781 kvm_emulate_wbinvd(&svm->vcpu); in wbinvd_interception()
2785 static int xsetbv_interception(struct vcpu_svm *svm) in xsetbv_interception() argument
2787 u64 new_bv = kvm_read_edx_eax(&svm->vcpu); in xsetbv_interception()
2788 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in xsetbv_interception()
2790 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { in xsetbv_interception()
2791 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in xsetbv_interception()
2792 skip_emulated_instruction(&svm->vcpu); in xsetbv_interception()
2798 static int task_switch_interception(struct vcpu_svm *svm) in task_switch_interception() argument
2802 int int_type = svm->vmcb->control.exit_int_info & in task_switch_interception()
2804 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; in task_switch_interception()
2806 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; in task_switch_interception()
2808 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; in task_switch_interception()
2812 tss_selector = (u16)svm->vmcb->control.exit_info_1; in task_switch_interception()
2814 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2817 else if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2828 svm->vcpu.arch.nmi_injected = false; in task_switch_interception()
2831 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2835 (u32)svm->vmcb->control.exit_info_2; in task_switch_interception()
2837 kvm_clear_exception_queue(&svm->vcpu); in task_switch_interception()
2840 kvm_clear_interrupt_queue(&svm->vcpu); in task_switch_interception()
2851 skip_emulated_instruction(&svm->vcpu); in task_switch_interception()
2856 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, in task_switch_interception()
2858 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in task_switch_interception()
2859 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in task_switch_interception()
2860 svm->vcpu.run->internal.ndata = 0; in task_switch_interception()
2866 static int cpuid_interception(struct vcpu_svm *svm) in cpuid_interception() argument
2868 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in cpuid_interception()
2869 kvm_emulate_cpuid(&svm->vcpu); in cpuid_interception()
2873 static int iret_interception(struct vcpu_svm *svm) in iret_interception() argument
2875 ++svm->vcpu.stat.nmi_window_exits; in iret_interception()
2876 clr_intercept(svm, INTERCEPT_IRET); in iret_interception()
2877 svm->vcpu.arch.hflags |= HF_IRET_MASK; in iret_interception()
2878 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); in iret_interception()
2879 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in iret_interception()
2883 static int invlpg_interception(struct vcpu_svm *svm) in invlpg_interception() argument
2886 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; in invlpg_interception()
2888 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); in invlpg_interception()
2889 skip_emulated_instruction(&svm->vcpu); in invlpg_interception()
2893 static int emulate_on_interception(struct vcpu_svm *svm) in emulate_on_interception() argument
2895 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; in emulate_on_interception()
2898 static int rdpmc_interception(struct vcpu_svm *svm) in rdpmc_interception() argument
2903 return emulate_on_interception(svm); in rdpmc_interception()
2905 err = kvm_rdpmc(&svm->vcpu); in rdpmc_interception()
2906 kvm_complete_insn_gp(&svm->vcpu, err); in rdpmc_interception()
2911 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, in check_selective_cr0_intercepted() argument
2914 unsigned long cr0 = svm->vcpu.arch.cr0; in check_selective_cr0_intercepted()
2918 intercept = svm->nested.intercept; in check_selective_cr0_intercepted()
2920 if (!is_guest_mode(&svm->vcpu) || in check_selective_cr0_intercepted()
2928 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; in check_selective_cr0_intercepted()
2929 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); in check_selective_cr0_intercepted()
2937 static int cr_interception(struct vcpu_svm *svm) in cr_interception() argument
2944 return emulate_on_interception(svm); in cr_interception()
2946 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2947 return emulate_on_interception(svm); in cr_interception()
2949 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in cr_interception()
2950 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) in cr_interception()
2953 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; in cr_interception()
2958 val = kvm_register_read(&svm->vcpu, reg); in cr_interception()
2961 if (!check_selective_cr0_intercepted(svm, val)) in cr_interception()
2962 err = kvm_set_cr0(&svm->vcpu, val); in cr_interception()
2968 err = kvm_set_cr3(&svm->vcpu, val); in cr_interception()
2971 err = kvm_set_cr4(&svm->vcpu, val); in cr_interception()
2974 err = kvm_set_cr8(&svm->vcpu, val); in cr_interception()
2978 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
2984 val = kvm_read_cr0(&svm->vcpu); in cr_interception()
2987 val = svm->vcpu.arch.cr2; in cr_interception()
2990 val = kvm_read_cr3(&svm->vcpu); in cr_interception()
2993 val = kvm_read_cr4(&svm->vcpu); in cr_interception()
2996 val = kvm_get_cr8(&svm->vcpu); in cr_interception()
3000 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
3003 kvm_register_write(&svm->vcpu, reg, val); in cr_interception()
3005 kvm_complete_insn_gp(&svm->vcpu, err); in cr_interception()
3010 static int dr_interception(struct vcpu_svm *svm) in dr_interception() argument
3015 if (svm->vcpu.guest_debug == 0) { in dr_interception()
3021 clr_dr_intercepts(svm); in dr_interception()
3022 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in dr_interception()
3027 return emulate_on_interception(svm); in dr_interception()
3029 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in dr_interception()
3030 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; in dr_interception()
3033 if (!kvm_require_dr(&svm->vcpu, dr - 16)) in dr_interception()
3035 val = kvm_register_read(&svm->vcpu, reg); in dr_interception()
3036 kvm_set_dr(&svm->vcpu, dr - 16, val); in dr_interception()
3038 if (!kvm_require_dr(&svm->vcpu, dr)) in dr_interception()
3040 kvm_get_dr(&svm->vcpu, dr, &val); in dr_interception()
3041 kvm_register_write(&svm->vcpu, reg, val); in dr_interception()
3044 skip_emulated_instruction(&svm->vcpu); in dr_interception()
3049 static int cr8_write_interception(struct vcpu_svm *svm) in cr8_write_interception() argument
3051 struct kvm_run *kvm_run = svm->vcpu.run; in cr8_write_interception()
3054 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); in cr8_write_interception()
3056 r = cr_interception(svm); in cr8_write_interception()
3057 if (irqchip_in_kernel(svm->vcpu.kvm)) in cr8_write_interception()
3059 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) in cr8_write_interception()
3074 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_msr() local
3078 *data = svm->vmcb->control.tsc_offset + in svm_get_msr()
3084 *data = svm->vmcb->save.star; in svm_get_msr()
3088 *data = svm->vmcb->save.lstar; in svm_get_msr()
3091 *data = svm->vmcb->save.cstar; in svm_get_msr()
3094 *data = svm->vmcb->save.kernel_gs_base; in svm_get_msr()
3097 *data = svm->vmcb->save.sfmask; in svm_get_msr()
3101 *data = svm->vmcb->save.sysenter_cs; in svm_get_msr()
3104 *data = svm->sysenter_eip; in svm_get_msr()
3107 *data = svm->sysenter_esp; in svm_get_msr()
3115 *data = svm->vmcb->save.dbgctl; in svm_get_msr()
3118 *data = svm->vmcb->save.br_from; in svm_get_msr()
3121 *data = svm->vmcb->save.br_to; in svm_get_msr()
3124 *data = svm->vmcb->save.last_excp_from; in svm_get_msr()
3127 *data = svm->vmcb->save.last_excp_to; in svm_get_msr()
3130 *data = svm->nested.hsave_msr; in svm_get_msr()
3133 *data = svm->nested.vm_cr_msr; in svm_get_msr()
3144 static int rdmsr_interception(struct vcpu_svm *svm) in rdmsr_interception() argument
3146 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in rdmsr_interception()
3149 if (svm_get_msr(&svm->vcpu, ecx, &data)) { in rdmsr_interception()
3151 kvm_inject_gp(&svm->vcpu, 0); in rdmsr_interception()
3155 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff); in rdmsr_interception()
3156 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32); in rdmsr_interception()
3157 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in rdmsr_interception()
3158 skip_emulated_instruction(&svm->vcpu); in rdmsr_interception()
3165 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_vm_cr() local
3173 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) in svm_set_vm_cr()
3176 svm->nested.vm_cr_msr &= ~chg_mask; in svm_set_vm_cr()
3177 svm->nested.vm_cr_msr |= (data & chg_mask); in svm_set_vm_cr()
3179 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; in svm_set_vm_cr()
3190 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_msr() local
3199 svm->vmcb->save.star = data; in svm_set_msr()
3203 svm->vmcb->save.lstar = data; in svm_set_msr()
3206 svm->vmcb->save.cstar = data; in svm_set_msr()
3209 svm->vmcb->save.kernel_gs_base = data; in svm_set_msr()
3212 svm->vmcb->save.sfmask = data; in svm_set_msr()
3216 svm->vmcb->save.sysenter_cs = data; in svm_set_msr()
3219 svm->sysenter_eip = data; in svm_set_msr()
3220 svm->vmcb->save.sysenter_eip = data; in svm_set_msr()
3223 svm->sysenter_esp = data; in svm_set_msr()
3224 svm->vmcb->save.sysenter_esp = data; in svm_set_msr()
3235 svm->vmcb->save.dbgctl = data; in svm_set_msr()
3236 mark_dirty(svm->vmcb, VMCB_LBR); in svm_set_msr()
3238 svm_enable_lbrv(svm); in svm_set_msr()
3240 svm_disable_lbrv(svm); in svm_set_msr()
3243 svm->nested.hsave_msr = data; in svm_set_msr()
3256 static int wrmsr_interception(struct vcpu_svm *svm) in wrmsr_interception() argument
3259 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in wrmsr_interception()
3260 u64 data = kvm_read_edx_eax(&svm->vcpu); in wrmsr_interception()
3266 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in wrmsr_interception()
3267 if (kvm_set_msr(&svm->vcpu, &msr)) { in wrmsr_interception()
3269 kvm_inject_gp(&svm->vcpu, 0); in wrmsr_interception()
3272 skip_emulated_instruction(&svm->vcpu); in wrmsr_interception()
3277 static int msr_interception(struct vcpu_svm *svm) in msr_interception() argument
3279 if (svm->vmcb->control.exit_info_1) in msr_interception()
3280 return wrmsr_interception(svm); in msr_interception()
3282 return rdmsr_interception(svm); in msr_interception()
3285 static int interrupt_window_interception(struct vcpu_svm *svm) in interrupt_window_interception() argument
3287 struct kvm_run *kvm_run = svm->vcpu.run; in interrupt_window_interception()
3289 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in interrupt_window_interception()
3290 svm_clear_vintr(svm); in interrupt_window_interception()
3291 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; in interrupt_window_interception()
3292 mark_dirty(svm->vmcb, VMCB_INTR); in interrupt_window_interception()
3293 ++svm->vcpu.stat.irq_window_exits; in interrupt_window_interception()
3298 if (!irqchip_in_kernel(svm->vcpu.kvm) && in interrupt_window_interception()
3300 !kvm_cpu_has_interrupt(&svm->vcpu)) { in interrupt_window_interception()
3308 static int pause_interception(struct vcpu_svm *svm) in pause_interception() argument
3310 kvm_vcpu_on_spin(&(svm->vcpu)); in pause_interception()
3314 static int nop_interception(struct vcpu_svm *svm) in nop_interception() argument
3316 skip_emulated_instruction(&(svm->vcpu)); in nop_interception()
3320 static int monitor_interception(struct vcpu_svm *svm) in monitor_interception() argument
3323 return nop_interception(svm); in monitor_interception()
3326 static int mwait_interception(struct vcpu_svm *svm) in mwait_interception() argument
3329 return nop_interception(svm); in mwait_interception()
3332 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
3398 struct vcpu_svm *svm = to_svm(vcpu); in dump_vmcb() local
3399 struct vmcb_control_area *control = &svm->vmcb->control; in dump_vmcb()
3400 struct vmcb_save_area *save = &svm->vmcb->save; in dump_vmcb()
3511 struct vcpu_svm *svm = to_svm(vcpu); in handle_exit() local
3513 u32 exit_code = svm->vmcb->control.exit_code; in handle_exit()
3515 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) in handle_exit()
3516 vcpu->arch.cr0 = svm->vmcb->save.cr0; in handle_exit()
3518 vcpu->arch.cr3 = svm->vmcb->save.cr3; in handle_exit()
3520 if (unlikely(svm->nested.exit_required)) { in handle_exit()
3521 nested_svm_vmexit(svm); in handle_exit()
3522 svm->nested.exit_required = false; in handle_exit()
3530 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, in handle_exit()
3531 svm->vmcb->control.exit_info_1, in handle_exit()
3532 svm->vmcb->control.exit_info_2, in handle_exit()
3533 svm->vmcb->control.exit_int_info, in handle_exit()
3534 svm->vmcb->control.exit_int_info_err, in handle_exit()
3537 vmexit = nested_svm_exit_special(svm); in handle_exit()
3540 vmexit = nested_svm_exit_handled(svm); in handle_exit()
3546 svm_complete_interrupts(svm); in handle_exit()
3548 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { in handle_exit()
3551 = svm->vmcb->control.exit_code; in handle_exit()
3557 if (is_external_interrupt(svm->vmcb->control.exit_int_info) && in handle_exit()
3563 __func__, svm->vmcb->control.exit_int_info, in handle_exit()
3573 return svm_exit_handlers[exit_code](svm); in handle_exit()
3585 static void pre_svm_run(struct vcpu_svm *svm) in pre_svm_run() argument
3592 if (svm->asid_generation != sd->asid_generation) in pre_svm_run()
3593 new_asid(svm, sd); in pre_svm_run()
3598 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_nmi() local
3600 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in svm_inject_nmi()
3602 set_intercept(svm, INTERCEPT_IRET); in svm_inject_nmi()
3606 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) in svm_inject_irq() argument
3610 control = &svm->vmcb->control; in svm_inject_irq()
3615 mark_dirty(svm->vmcb, VMCB_INTR); in svm_inject_irq()
3620 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_irq() local
3622 BUG_ON(!(gif_set(svm))); in svm_set_irq()
3627 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_set_irq()
3633 struct vcpu_svm *svm = to_svm(vcpu); in update_cr8_intercept() local
3638 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); in update_cr8_intercept()
3644 set_cr_intercept(svm, INTERCEPT_CR8_WRITE); in update_cr8_intercept()
3669 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_allowed() local
3670 struct vmcb *vmcb = svm->vmcb; in svm_nmi_allowed()
3673 !(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_nmi_allowed()
3674 ret = ret && gif_set(svm) && nested_svm_nmi(svm); in svm_nmi_allowed()
3681 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nmi_mask() local
3683 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_get_nmi_mask()
3688 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nmi_mask() local
3691 svm->vcpu.arch.hflags |= HF_NMI_MASK; in svm_set_nmi_mask()
3692 set_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
3694 svm->vcpu.arch.hflags &= ~HF_NMI_MASK; in svm_set_nmi_mask()
3695 clr_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
3701 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_allowed() local
3702 struct vmcb *vmcb = svm->vmcb; in svm_interrupt_allowed()
3705 if (!gif_set(svm) || in svm_interrupt_allowed()
3712 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); in svm_interrupt_allowed()
3719 struct vcpu_svm *svm = to_svm(vcpu); in enable_irq_window() local
3727 if (gif_set(svm) && nested_svm_intr(svm)) { in enable_irq_window()
3728 svm_set_vintr(svm); in enable_irq_window()
3729 svm_inject_irq(svm, 0x0); in enable_irq_window()
3735 struct vcpu_svm *svm = to_svm(vcpu); in enable_nmi_window() local
3737 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) in enable_nmi_window()
3745 svm->nmi_singlestep = true; in enable_nmi_window()
3746 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); in enable_nmi_window()
3756 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb() local
3759 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in svm_flush_tlb()
3761 svm->asid_generation--; in svm_flush_tlb()
3770 struct vcpu_svm *svm = to_svm(vcpu); in sync_cr8_to_lapic() local
3775 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) { in sync_cr8_to_lapic()
3776 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; in sync_cr8_to_lapic()
3783 struct vcpu_svm *svm = to_svm(vcpu); in sync_lapic_to_cr8() local
3790 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; in sync_lapic_to_cr8()
3791 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; in sync_lapic_to_cr8()
3794 static void svm_complete_interrupts(struct vcpu_svm *svm) in svm_complete_interrupts() argument
3798 u32 exitintinfo = svm->vmcb->control.exit_int_info; in svm_complete_interrupts()
3799 unsigned int3_injected = svm->int3_injected; in svm_complete_interrupts()
3801 svm->int3_injected = 0; in svm_complete_interrupts()
3807 if ((svm->vcpu.arch.hflags & HF_IRET_MASK) in svm_complete_interrupts()
3808 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { in svm_complete_interrupts()
3809 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); in svm_complete_interrupts()
3810 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3813 svm->vcpu.arch.nmi_injected = false; in svm_complete_interrupts()
3814 kvm_clear_exception_queue(&svm->vcpu); in svm_complete_interrupts()
3815 kvm_clear_interrupt_queue(&svm->vcpu); in svm_complete_interrupts()
3820 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3827 svm->vcpu.arch.nmi_injected = true; in svm_complete_interrupts()
3837 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) in svm_complete_interrupts()
3838 kvm_rip_write(&svm->vcpu, in svm_complete_interrupts()
3839 kvm_rip_read(&svm->vcpu) - in svm_complete_interrupts()
3844 u32 err = svm->vmcb->control.exit_int_info_err; in svm_complete_interrupts()
3845 kvm_requeue_exception_e(&svm->vcpu, vector, err); in svm_complete_interrupts()
3848 kvm_requeue_exception(&svm->vcpu, vector); in svm_complete_interrupts()
3851 kvm_queue_interrupt(&svm->vcpu, vector, false); in svm_complete_interrupts()
3860 struct vcpu_svm *svm = to_svm(vcpu); in svm_cancel_injection() local
3861 struct vmcb_control_area *control = &svm->vmcb->control; in svm_cancel_injection()
3866 svm_complete_interrupts(svm); in svm_cancel_injection()
3871 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_run() local
3873 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
3874 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
3875 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
3881 if (unlikely(svm->nested.exit_required)) in svm_vcpu_run()
3884 pre_svm_run(svm); in svm_vcpu_run()
3888 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
3940 : [svm]"a"(svm), in svm_vcpu_run()
3968 wrmsrl(MSR_GS_BASE, svm->host.gs_base); in svm_vcpu_run()
3970 loadsegment(fs, svm->host.fs); in svm_vcpu_run()
3972 loadsegment(gs, svm->host.gs); in svm_vcpu_run()
3980 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
3981 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
3982 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
3983 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
3985 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM); in svm_vcpu_run()
3987 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
3988 kvm_before_handle_nmi(&svm->vcpu); in svm_vcpu_run()
3994 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
3995 kvm_after_handle_nmi(&svm->vcpu); in svm_vcpu_run()
3999 svm->next_rip = 0; in svm_vcpu_run()
4001 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; in svm_vcpu_run()
4004 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) in svm_vcpu_run()
4005 svm->apf_reason = kvm_read_and_reset_pf_reason(); in svm_vcpu_run()
4016 if (unlikely(svm->vmcb->control.exit_code == in svm_vcpu_run()
4018 svm_handle_mce(svm); in svm_vcpu_run()
4020 mark_all_clean(svm->vmcb); in svm_vcpu_run()
4025 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr3() local
4027 svm->vmcb->save.cr3 = root; in svm_set_cr3()
4028 mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr3()
4034 struct vcpu_svm *svm = to_svm(vcpu); in set_tdp_cr3() local
4036 svm->vmcb->control.nested_cr3 = root; in set_tdp_cr3()
4037 mark_dirty(svm->vmcb, VMCB_NPT); in set_tdp_cr3()
4040 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); in set_tdp_cr3()
4041 mark_dirty(svm->vmcb, VMCB_CR); in set_tdp_cr3()
4146 struct vcpu_svm *svm = to_svm(vcpu); in svm_fpu_deactivate() local
4148 set_exception_intercept(svm, NM_VECTOR); in svm_fpu_deactivate()
4149 update_cr0_intercept(svm); in svm_fpu_deactivate()
4219 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_intercept() local
4222 struct vmcb *vmcb = svm->vmcb; in svm_check_intercept()
4248 intercept = svm->nested.intercept; in svm_check_intercept()
4326 vmexit = nested_svm_exit_handled(svm); in svm_check_intercept()