Lines Matching refs:pfn

262 			  pfn_t pfn, unsigned access)  in set_mmio_spte()  argument
264 if (unlikely(is_noslot_pfn(pfn))) { in set_mmio_spte()
590 pfn_t pfn; in mmu_spte_clear_track_bits() local
601 pfn = spte_to_pfn(old_spte); in mmu_spte_clear_track_bits()
608 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); in mmu_spte_clear_track_bits()
611 kvm_set_pfn_accessed(pfn); in mmu_spte_clear_track_bits()
613 kvm_set_pfn_dirty(pfn); in mmu_spte_clear_track_bits()
2479 static bool kvm_is_mmio_pfn(pfn_t pfn) in kvm_is_mmio_pfn() argument
2481 if (pfn_valid(pfn)) in kvm_is_mmio_pfn()
2482 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); in kvm_is_mmio_pfn()
2489 gfn_t gfn, pfn_t pfn, bool speculative, in set_spte() argument
2495 if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) in set_spte()
2514 kvm_is_mmio_pfn(pfn)); in set_spte()
2521 spte |= (u64)pfn << PAGE_SHIFT; in set_spte()
2569 int level, gfn_t gfn, pfn_t pfn, bool speculative, in mmu_set_spte() argument
2591 } else if (pfn != spte_to_pfn(*sptep)) { in mmu_set_spte()
2593 spte_to_pfn(*sptep), pfn); in mmu_set_spte()
2600 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, in mmu_set_spte()
2626 kvm_release_pfn_clean(pfn); in mmu_set_spte()
2712 int map_writable, int level, gfn_t gfn, pfn_t pfn, in __direct_map() argument
2726 write, &emulate, level, gfn, pfn, in __direct_map()
2762 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) in kvm_handle_bad_page() argument
2770 if (pfn == KVM_PFN_ERR_RO_FAULT) in kvm_handle_bad_page()
2773 if (pfn == KVM_PFN_ERR_HWPOISON) { in kvm_handle_bad_page()
2784 pfn_t pfn = *pfnp; in transparent_hugepage_adjust() local
2794 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && in transparent_hugepage_adjust()
2796 PageTransCompound(pfn_to_page(pfn)) && in transparent_hugepage_adjust()
2810 VM_BUG_ON((gfn & mask) != (pfn & mask)); in transparent_hugepage_adjust()
2811 if (pfn & mask) { in transparent_hugepage_adjust()
2814 kvm_release_pfn_clean(pfn); in transparent_hugepage_adjust()
2815 pfn &= ~mask; in transparent_hugepage_adjust()
2816 kvm_get_pfn(pfn); in transparent_hugepage_adjust()
2817 *pfnp = pfn; in transparent_hugepage_adjust()
2823 pfn_t pfn, unsigned access, int *ret_val) in handle_abnormal_pfn() argument
2828 if (unlikely(is_error_pfn(pfn))) { in handle_abnormal_pfn()
2829 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); in handle_abnormal_pfn()
2833 if (unlikely(is_noslot_pfn(pfn))) in handle_abnormal_pfn()
2977 gva_t gva, pfn_t *pfn, bool write, bool *writable);
2986 pfn_t pfn; in nonpaging_map() local
3009 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) in nonpaging_map()
3012 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) in nonpaging_map()
3020 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in nonpaging_map()
3021 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, in nonpaging_map()
3030 kvm_release_pfn_clean(pfn); in nonpaging_map()
3446 gva_t gva, pfn_t *pfn, bool write, bool *writable) in try_async_pf() argument
3453 *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); in try_async_pf()
3467 *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); in try_async_pf()
3484 pfn_t pfn; in tdp_page_fault() local
3522 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) in tdp_page_fault()
3525 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) in tdp_page_fault()
3533 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in tdp_page_fault()
3535 level, gfn, pfn, prefault); in tdp_page_fault()
3542 kvm_release_pfn_clean(pfn); in tdp_page_fault()
4637 pfn_t pfn; in kvm_mmu_zap_collapsible_spte() local
4643 pfn = spte_to_pfn(*sptep); in kvm_mmu_zap_collapsible_spte()
4653 !kvm_is_reserved_pfn(pfn) && in kvm_mmu_zap_collapsible_spte()
4654 PageTransCompound(pfn_to_page(pfn))) { in kvm_mmu_zap_collapsible_spte()