Lines Matching refs:pfn
101 static bool kvm_is_device_pfn(unsigned long pfn) in kvm_is_device_pfn() argument
103 return !pfn_valid(pfn); in kvm_is_device_pfn()
452 unsigned long end, unsigned long pfn, in create_hyp_pte_mappings() argument
461 kvm_set_pte(pte, pfn_pte(pfn, prot)); in create_hyp_pte_mappings()
464 pfn++; in create_hyp_pte_mappings()
469 unsigned long end, unsigned long pfn, in create_hyp_pmd_mappings() argument
495 create_hyp_pte_mappings(pmd, addr, next, pfn, prot); in create_hyp_pmd_mappings()
496 pfn += (next - addr) >> PAGE_SHIFT; in create_hyp_pmd_mappings()
503 unsigned long end, unsigned long pfn, in create_hyp_pud_mappings() argument
527 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); in create_hyp_pud_mappings()
530 pfn += (next - addr) >> PAGE_SHIFT; in create_hyp_pud_mappings()
538 unsigned long pfn, pgprot_t prot) in __create_hyp_mappings() argument
564 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot); in __create_hyp_mappings()
567 pfn += (next - addr) >> PAGE_SHIFT; in __create_hyp_mappings()
969 unsigned long pfn; in kvm_phys_addr_ioremap() local
973 pfn = __phys_to_pfn(pa); in kvm_phys_addr_ioremap()
976 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); in kvm_phys_addr_ioremap()
992 pfn++; in kvm_phys_addr_ioremap()
1002 pfn_t pfn = *pfnp; in transparent_hugepage_adjust() local
1005 if (PageTransCompound(pfn_to_page(pfn))) { in transparent_hugepage_adjust()
1026 VM_BUG_ON((gfn & mask) != (pfn & mask)); in transparent_hugepage_adjust()
1027 if (pfn & mask) { in transparent_hugepage_adjust()
1029 kvm_release_pfn_clean(pfn); in transparent_hugepage_adjust()
1030 pfn &= ~mask; in transparent_hugepage_adjust()
1031 kvm_get_pfn(pfn); in transparent_hugepage_adjust()
1032 *pfnp = pfn; in transparent_hugepage_adjust()
1208 static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, in coherent_cache_guest_page() argument
1211 __coherent_cache_guest_page(vcpu, pfn, size, uncached); in coherent_cache_guest_page()
1225 pfn_t pfn; in user_mem_abort() local
1283 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); in user_mem_abort()
1284 if (is_error_pfn(pfn)) in user_mem_abort()
1287 if (kvm_is_device_pfn(pfn)) { in user_mem_abort()
1312 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); in user_mem_abort()
1317 pmd_t new_pmd = pfn_pmd(pfn, mem_type); in user_mem_abort()
1321 kvm_set_pfn_dirty(pfn); in user_mem_abort()
1323 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); in user_mem_abort()
1326 pte_t new_pte = pfn_pte(pfn, mem_type); in user_mem_abort()
1330 kvm_set_pfn_dirty(pfn); in user_mem_abort()
1333 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached); in user_mem_abort()
1339 kvm_set_pfn_accessed(pfn); in user_mem_abort()
1340 kvm_release_pfn_clean(pfn); in user_mem_abort()
1353 pfn_t pfn; in handle_access_fault() local
1366 pfn = pmd_pfn(*pmd); in handle_access_fault()
1376 pfn = pte_pfn(*pte); in handle_access_fault()
1381 kvm_set_pfn_accessed(pfn); in handle_access_fault()