Lines Matching refs:pfn

262 			  pfn_t pfn, unsigned access)  in set_mmio_spte()  argument
264 if (unlikely(is_noslot_pfn(pfn))) { in set_mmio_spte()
590 pfn_t pfn; in mmu_spte_clear_track_bits() local
601 pfn = spte_to_pfn(old_spte); in mmu_spte_clear_track_bits()
608 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); in mmu_spte_clear_track_bits()
611 kvm_set_pfn_accessed(pfn); in mmu_spte_clear_track_bits()
613 kvm_set_pfn_dirty(pfn); in mmu_spte_clear_track_bits()
2515 gfn_t gfn, pfn_t pfn, bool speculative, in set_spte() argument
2521 if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access)) in set_spte()
2540 kvm_is_reserved_pfn(pfn)); in set_spte()
2547 spte |= (u64)pfn << PAGE_SHIFT; in set_spte()
2595 int level, gfn_t gfn, pfn_t pfn, bool speculative, in mmu_set_spte() argument
2617 } else if (pfn != spte_to_pfn(*sptep)) { in mmu_set_spte()
2619 spte_to_pfn(*sptep), pfn); in mmu_set_spte()
2626 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, in mmu_set_spte()
2652 kvm_release_pfn_clean(pfn); in mmu_set_spte()
2736 int map_writable, int level, gfn_t gfn, pfn_t pfn, in __direct_map() argument
2750 write, &emulate, level, gfn, pfn, in __direct_map()
2786 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) in kvm_handle_bad_page() argument
2794 if (pfn == KVM_PFN_ERR_RO_FAULT) in kvm_handle_bad_page()
2797 if (pfn == KVM_PFN_ERR_HWPOISON) { in kvm_handle_bad_page()
2808 pfn_t pfn = *pfnp; in transparent_hugepage_adjust() local
2818 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && in transparent_hugepage_adjust()
2820 PageTransCompound(pfn_to_page(pfn)) && in transparent_hugepage_adjust()
2834 VM_BUG_ON((gfn & mask) != (pfn & mask)); in transparent_hugepage_adjust()
2835 if (pfn & mask) { in transparent_hugepage_adjust()
2838 kvm_release_pfn_clean(pfn); in transparent_hugepage_adjust()
2839 pfn &= ~mask; in transparent_hugepage_adjust()
2840 kvm_get_pfn(pfn); in transparent_hugepage_adjust()
2841 *pfnp = pfn; in transparent_hugepage_adjust()
2847 pfn_t pfn, unsigned access, int *ret_val) in handle_abnormal_pfn() argument
2852 if (unlikely(is_error_pfn(pfn))) { in handle_abnormal_pfn()
2853 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); in handle_abnormal_pfn()
2857 if (unlikely(is_noslot_pfn(pfn))) in handle_abnormal_pfn()
3001 gva_t gva, pfn_t *pfn, bool write, bool *writable);
3010 pfn_t pfn; in nonpaging_map() local
3035 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) in nonpaging_map()
3038 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) in nonpaging_map()
3046 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in nonpaging_map()
3047 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, in nonpaging_map()
3056 kvm_release_pfn_clean(pfn); in nonpaging_map()
3431 gva_t gva, pfn_t *pfn, bool write, bool *writable) in try_async_pf() argument
3435 *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); in try_async_pf()
3450 *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); in try_async_pf()
3458 pfn_t pfn; in tdp_page_fault() local
3493 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) in tdp_page_fault()
3496 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) in tdp_page_fault()
3504 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in tdp_page_fault()
3506 level, gfn, pfn, prefault); in tdp_page_fault()
3513 kvm_release_pfn_clean(pfn); in tdp_page_fault()
4437 pfn_t pfn; in kvm_mmu_zap_collapsible_spte() local
4444 pfn = spte_to_pfn(*sptep); in kvm_mmu_zap_collapsible_spte()
4454 !kvm_is_reserved_pfn(pfn) && in kvm_mmu_zap_collapsible_spte()
4455 PageTransCompound(pfn_to_page(pfn))) { in kvm_mmu_zap_collapsible_spte()