Lines Matching refs:gfn
231 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
238 mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT; in mark_mmio_spte()
240 trace_mark_mmio_spte(sptep, gfn, access, gen); in mark_mmio_spte()
261 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
265 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte()
759 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
762 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn() argument
765 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); in kvm_mmu_page_set_gfn()
767 sp->gfns[index] = gfn; in kvm_mmu_page_set_gfn()
774 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot() argument
780 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot()
789 gfn_t gfn; in account_shadowed() local
792 gfn = sp->gfn; in account_shadowed()
794 slot = __gfn_to_memslot(slots, gfn); in account_shadowed()
796 linfo = lpage_info_slot(gfn, slot, i); in account_shadowed()
807 gfn_t gfn; in unaccount_shadowed() local
810 gfn = sp->gfn; in unaccount_shadowed()
812 slot = __gfn_to_memslot(slots, gfn); in unaccount_shadowed()
814 linfo = lpage_info_slot(gfn, slot, i); in unaccount_shadowed()
821 static int __has_wrprotected_page(gfn_t gfn, int level, in __has_wrprotected_page() argument
827 linfo = lpage_info_slot(gfn, slot, level); in __has_wrprotected_page()
834 static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level) in has_wrprotected_page() argument
838 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in has_wrprotected_page()
839 return __has_wrprotected_page(gfn, level, slot); in has_wrprotected_page()
842 static int host_mapping_level(struct kvm *kvm, gfn_t gfn) in host_mapping_level() argument
847 page_size = kvm_host_page_size(kvm, gfn); in host_mapping_level()
871 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, in gfn_to_memslot_dirty_bitmap() argument
876 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in gfn_to_memslot_dirty_bitmap()
1035 static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, in __gfn_to_rmap() argument
1040 idx = gfn_to_index(gfn, slot->base_gfn, level); in __gfn_to_rmap()
1047 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp) in gfn_to_rmap() argument
1053 slot = __gfn_to_memslot(slots, gfn); in gfn_to_rmap()
1054 return __gfn_to_rmap(gfn, sp->role.level, slot); in gfn_to_rmap()
1065 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_add() argument
1071 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); in rmap_add()
1072 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); in rmap_add()
1079 gfn_t gfn; in rmap_remove() local
1083 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); in rmap_remove()
1084 rmapp = gfn_to_rmap(kvm, gfn, sp); in rmap_remove()
1342 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) in rmap_write_protect() argument
1349 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in rmap_write_protect()
1352 rmapp = __gfn_to_rmap(gfn, i, slot); in rmap_write_protect()
1377 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_unmap_rmapp() argument
1384 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_set_pte_rmapp() argument
1400 sptep, *sptep, gfn, level); in kvm_set_pte_rmapp()
1435 gfn_t gfn; member
1447 iterator->gfn = iterator->start_gfn; in rmap_walk_init_level()
1448 iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot); in rmap_walk_init_level()
1475 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); in slot_rmap_walk_next()
1501 gfn_t gfn, in kvm_handle_hva_range() argument
1534 iterator.gfn, iterator.level, data); in kvm_handle_hva_range()
1545 gfn_t gfn, int level, in kvm_handle_hva() argument
1567 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_age_rmapp() argument
1583 trace_kvm_age_page(gfn, level, slot, young); in kvm_age_rmapp()
1588 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_test_age_rmapp() argument
1614 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_recycle() argument
1621 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp); in rmap_recycle()
1623 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); in rmap_recycle()
1696 static unsigned kvm_page_table_hashfn(gfn_t gfn) in kvm_page_table_hashfn() argument
1698 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); in kvm_page_table_hashfn()
1889 if ((_sp)->gfn != (_gfn)) {} else
1943 static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_sync_pages() argument
1949 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_sync_pages()
2041 protected |= rmap_write_protect(vcpu, sp->gfn); in mmu_sync_children()
2082 gfn_t gfn, in kvm_mmu_get_page() argument
2106 for_each_gfn_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_get_page()
2134 sp->gfn = gfn; in kvm_mmu_get_page()
2137 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); in kvm_mmu_get_page()
2139 if (rmap_write_protect(vcpu, gfn)) in kvm_mmu_get_page()
2142 kvm_sync_pages(vcpu, gfn); in kvm_mmu_get_page()
2415 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) in kvm_mmu_unprotect_page() argument
2421 pgprintk("%s: looking for gfn %llx\n", __func__, gfn); in kvm_mmu_unprotect_page()
2424 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { in kvm_mmu_unprotect_page()
2425 pgprintk("%s: gfn %llx role %x\n", __func__, gfn, in kvm_mmu_unprotect_page()
2446 static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_unsync_pages() argument
2450 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_unsync_pages()
2458 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, in mmu_need_write_protect() argument
2464 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in mmu_need_write_protect()
2475 kvm_unsync_pages(vcpu, gfn); in mmu_need_write_protect()
2489 gfn_t gfn, pfn_t pfn, bool speculative, in set_spte() argument
2495 if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) in set_spte()
2513 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, in set_spte()
2532 has_wrprotected_page(vcpu, gfn, level)) in set_spte()
2546 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { in set_spte()
2548 __func__, gfn); in set_spte()
2556 kvm_vcpu_mark_page_dirty(vcpu, gfn); in set_spte()
2569 int level, gfn_t gfn, pfn_t pfn, bool speculative, in mmu_set_spte() argument
2576 *sptep, write_fault, gfn); in mmu_set_spte()
2600 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, in mmu_set_spte()
2613 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, in mmu_set_spte()
2620 rmap_count = rmap_add(vcpu, sptep, gfn); in mmu_set_spte()
2622 rmap_recycle(vcpu, sptep, gfn); in mmu_set_spte()
2629 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, in pte_prefetch_gfn_to_pfn() argument
2634 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); in pte_prefetch_gfn_to_pfn()
2638 return gfn_to_pfn_memslot_atomic(slot, gfn); in pte_prefetch_gfn_to_pfn()
2649 gfn_t gfn; in direct_pte_prefetch_many() local
2651 gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); in direct_pte_prefetch_many()
2652 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); in direct_pte_prefetch_many()
2656 ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); in direct_pte_prefetch_many()
2660 for (i = 0; i < ret; i++, gfn++, start++) in direct_pte_prefetch_many()
2662 sp->role.level, gfn, page_to_pfn(pages[i]), in direct_pte_prefetch_many()
2712 int map_writable, int level, gfn_t gfn, pfn_t pfn, in __direct_map() argument
2723 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { in __direct_map()
2726 write, &emulate, level, gfn, pfn, in __direct_map()
2762 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) in kvm_handle_bad_page() argument
2774 kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); in kvm_handle_bad_page()
2785 gfn_t gfn = *gfnp; in transparent_hugepage_adjust() local
2797 !has_wrprotected_page(vcpu, gfn, PT_DIRECTORY_LEVEL)) { in transparent_hugepage_adjust()
2810 VM_BUG_ON((gfn & mask) != (pfn & mask)); in transparent_hugepage_adjust()
2812 gfn &= ~mask; in transparent_hugepage_adjust()
2813 *gfnp = gfn; in transparent_hugepage_adjust()
2822 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn() argument
2829 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); in handle_abnormal_pfn()
2834 vcpu_cache_mmio_info(vcpu, gva, gfn, access); in handle_abnormal_pfn()
2866 gfn_t gfn; in fast_pf_fix_direct_spte() local
2874 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in fast_pf_fix_direct_spte()
2889 kvm_vcpu_mark_page_dirty(vcpu, gfn); in fast_pf_fix_direct_spte()
2976 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2981 gfn_t gfn, bool prefault) in nonpaging_map() argument
2990 level = mapping_level(vcpu, gfn, &force_pt_level); in nonpaging_map()
3000 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); in nonpaging_map()
3009 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) in nonpaging_map()
3012 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) in nonpaging_map()
3020 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in nonpaging_map()
3021 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, in nonpaging_map()
3375 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault() local
3384 trace_handle_mmio_page_fault(addr, gfn, access); in handle_mmio_page_fault()
3385 vcpu_cache_mmio_info(vcpu, addr, gfn, access); in handle_mmio_page_fault()
3400 gfn_t gfn; in nonpaging_page_fault() local
3418 gfn = gva >> PAGE_SHIFT; in nonpaging_page_fault()
3421 error_code, gfn, prefault); in nonpaging_page_fault()
3424 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) in kvm_arch_setup_async_pf() argument
3429 arch.gfn = gfn; in kvm_arch_setup_async_pf()
3433 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); in kvm_arch_setup_async_pf()
3445 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, in try_async_pf() argument
3451 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in try_async_pf()
3453 *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); in try_async_pf()
3458 trace_kvm_try_async_get_page(gva, gfn); in try_async_pf()
3459 if (kvm_find_async_pf_gfn(vcpu, gfn)) { in try_async_pf()
3460 trace_kvm_async_pf_doublefault(gva, gfn); in try_async_pf()
3463 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) in try_async_pf()
3467 *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); in try_async_pf()
3472 check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level) in check_hugepage_cache_consistency() argument
3476 gfn &= ~(page_num - 1); in check_hugepage_cache_consistency()
3478 return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num); in check_hugepage_cache_consistency()
3488 gfn_t gfn = gpa >> PAGE_SHIFT; in tdp_page_fault() local
3506 force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn, in tdp_page_fault()
3508 level = mapping_level(vcpu, gfn, &force_pt_level); in tdp_page_fault()
3511 !check_hugepage_cache_consistency(vcpu, gfn, level)) in tdp_page_fault()
3513 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); in tdp_page_fault()
3522 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) in tdp_page_fault()
3525 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) in tdp_page_fault()
3533 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in tdp_page_fault()
3535 level, gfn, pfn, prefault); in tdp_page_fault()
3577 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in sync_mmio_spte() argument
3581 if (gfn != get_mmio_spte_gfn(*sptep)) { in sync_mmio_spte()
3587 mark_mmio_spte(vcpu, sptep, gfn, access); in sync_mmio_spte()
4287 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mmu_pte_write() local
4326 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_pte_write()