Lines Matching refs:mmu

1836 	if (vcpu->arch.mmu.sync_page(vcpu, sp)) {  in __kvm_sync_page()
1885 (vcpu->arch.mmu.sync_page(vcpu, s))) { in kvm_sync_pages()
2023 role = vcpu->arch.mmu.base_role; in kvm_mmu_get_page()
2029 if (!vcpu->arch.mmu.direct_map in kvm_mmu_get_page()
2030 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page()
2085 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; in shadow_walk_init()
2086 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init()
2089 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && in shadow_walk_init()
2090 !vcpu->arch.mmu.direct_map) in shadow_walk_init()
2095 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; in shadow_walk_init()
2744 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in __direct_map()
2931 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in fast_page_fault()
3067 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_free_roots()
3070 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && in mmu_free_roots()
3071 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || in mmu_free_roots()
3072 vcpu->arch.mmu.direct_map)) { in mmu_free_roots()
3073 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_free_roots()
3083 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3089 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_free_roots()
3099 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in mmu_free_roots()
3103 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in mmu_free_roots()
3123 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3130 vcpu->arch.mmu.root_hpa = __pa(sp->spt); in mmu_alloc_direct_roots()
3131 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3133 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_direct_roots()
3145 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; in mmu_alloc_direct_roots()
3147 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_direct_roots()
3161 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; in mmu_alloc_shadow_roots()
3170 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3171 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_alloc_shadow_roots()
3182 vcpu->arch.mmu.root_hpa = root; in mmu_alloc_shadow_roots()
3192 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) in mmu_alloc_shadow_roots()
3196 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_shadow_roots()
3199 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3200 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); in mmu_alloc_shadow_roots()
3202 vcpu->arch.mmu.pae_root[i] = 0; in mmu_alloc_shadow_roots()
3218 vcpu->arch.mmu.pae_root[i] = root | pm_mask; in mmu_alloc_shadow_roots()
3220 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_shadow_roots()
3226 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3227 if (vcpu->arch.mmu.lm_root == NULL) { in mmu_alloc_shadow_roots()
3239 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; in mmu_alloc_shadow_roots()
3241 vcpu->arch.mmu.lm_root = lm_root; in mmu_alloc_shadow_roots()
3244 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); in mmu_alloc_shadow_roots()
3252 if (vcpu->arch.mmu.direct_map) in mmu_alloc_roots()
3263 if (vcpu->arch.mmu.direct_map) in mmu_sync_roots()
3266 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_sync_roots()
3271 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_sync_roots()
3272 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_sync_roots()
3279 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_sync_roots()
3328 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in walk_shadow_page_get_mmio_spte()
3401 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in nonpaging_page_fault()
3415 arch.direct_map = vcpu->arch.mmu.direct_map; in kvm_arch_setup_async_pf()
3416 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); in kvm_arch_setup_async_pf()
3467 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in tdp_page_fault()
3545 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in inject_page_fault()
3565 static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) in is_last_gpte() argument
3571 return mmu->last_pte_bitmap & (1 << index); in is_last_gpte()
3695 struct kvm_mmu *mmu, bool ept) in update_permission_bitmask() argument
3703 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { in update_permission_bitmask()
3722 x |= !mmu->nx; in update_permission_bitmask()
3755 mmu->permissions[byte] = map; in update_permission_bitmask()
3759 static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) in update_last_pte_bitmap() argument
3762 unsigned level, root_level = mmu->root_level; in update_last_pte_bitmap()
3771 && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu))) in update_last_pte_bitmap()
3774 mmu->last_pte_bitmap = map; in update_last_pte_bitmap()
3833 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_tdp_mmu()
3877 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_mmu()
3902 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_ept_mmu()
3925 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_softmmu()
4002 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); in kvm_mmu_load()
4011 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_unload()
4025 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
4223 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) in kvm_mmu_pte_write()
4242 if (vcpu->arch.mmu.direct_map) in kvm_mmu_unprotect_page_virt()
4271 if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) in is_mmio_page_fault()
4283 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); in kvm_mmu_page_fault()
4315 vcpu->arch.mmu.invlpg(vcpu, gva); in kvm_mmu_invlpg()
4335 free_page((unsigned long)vcpu->arch.mmu.pae_root); in free_mmu_pages()
4336 if (vcpu->arch.mmu.lm_root != NULL) in free_mmu_pages()
4337 free_page((unsigned long)vcpu->arch.mmu.lm_root); in free_mmu_pages()
4354 vcpu->arch.mmu.pae_root = page_address(page); in alloc_mmu_pages()
4356 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in alloc_mmu_pages()
4363 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in kvm_mmu_create()
4364 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in kvm_mmu_create()
4365 vcpu->arch.mmu.translate_gpa = translate_gpa; in kvm_mmu_create()
4373 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_setup()
4846 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in kvm_mmu_get_spte_hierarchy()