Lines Matching refs:gfn
510 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
513 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
561 gfn_t gfn;
571 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
573 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
7833 * If the gfn and userspace address are not aligned wrt each
8086 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
8088 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
8096 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8098 u32 key = kvm_async_pf_hash_fn(gfn);
8103 vcpu->arch.apf.gfns[key] = gfn;
8106 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
8109 u32 key = kvm_async_pf_hash_fn(gfn);
8112 (vcpu->arch.apf.gfns[key] != gfn &&
8119 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8121 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
8124 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8128 i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
8160 kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
8185 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);