Lines Matching refs:pfn

166 	pfn_t pfn;  in kvmppc_map_magic()  local
168 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; in kvmppc_map_magic()
169 get_page(pfn_to_page(pfn)); in kvmppc_map_magic()
177 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | in kvmppc_map_magic()
249 pfn_t pfn, unsigned int wimg) in kvmppc_e500_ref_setup() argument
251 ref->pfn = pfn; in kvmppc_e500_ref_setup()
258 kvm_set_pfn_accessed(pfn); in kvmppc_e500_ref_setup()
261 kvm_set_pfn_dirty(pfn); in kvmppc_e500_ref_setup()
268 trace_kvm_booke206_ref_release(ref->pfn, ref->flags); in kvmppc_e500_ref_release()
312 pfn_t pfn = ref->pfn; in kvmppc_e500_setup_stlbe() local
320 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | in kvmppc_e500_setup_stlbe()
330 unsigned long pfn = 0; /* silence GCC warning */ in kvmppc_e500_shadow_map() local
381 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); in kvmppc_e500_shadow_map()
383 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
414 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map()
416 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
419 (pfn & (tsize_pages - 1))) in kvmppc_e500_shadow_map()
423 pfn &= ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
451 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map()
452 if (is_error_noslot_pfn(pfn)) { in kvmppc_e500_shadow_map()
460 pfn &= ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
490 __func__, (long)gfn, pfn); in kvmppc_e500_shadow_map()
495 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); in kvmppc_e500_shadow_map()
501 kvmppc_mmu_flush_icache(pfn); in kvmppc_e500_shadow_map()
507 kvm_release_pfn_clean(pfn); in kvmppc_e500_shadow_map()
632 hfn_t pfn; in kvmppc_load_last_inst() local
699 pfn = addr >> PAGE_SHIFT; in kvmppc_load_last_inst()
702 if (unlikely(!page_is_ram(pfn))) { in kvmppc_load_last_inst()
709 page = pfn_to_page(pfn); in kvmppc_load_last_inst()