gpa               362 arch/arm/include/asm/kvm_mmu.h 				      gpa_t gpa, void *data, unsigned long len)
gpa               365 arch/arm/include/asm/kvm_mmu.h 	int ret = kvm_read_guest(kvm, gpa, data, len);
gpa               372 arch/arm/include/asm/kvm_mmu.h static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
gpa               376 arch/arm/include/asm/kvm_mmu.h 	int ret = kvm_write_guest(kvm, gpa, data, len);
gpa               427 arch/arm64/include/asm/kvm_mmu.h 				      gpa_t gpa, void *data, unsigned long len)
gpa               430 arch/arm64/include/asm/kvm_mmu.h 	int ret = kvm_read_guest(kvm, gpa, data, len);
gpa               437 arch/arm64/include/asm/kvm_mmu.h static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
gpa               441 arch/arm64/include/asm/kvm_mmu.h 	int ret = kvm_write_guest(kvm, gpa, data, len);
gpa               166 arch/ia64/include/asm/uv/uv_hub.h static inline void *uv_va(unsigned long gpa)
gpa               168 arch/ia64/include/asm/uv/uv_hub.h 	return __va(gpa & uv_hub_info->gpa_mask);
gpa               890 arch/mips/include/asm/kvm_host.h 			    unsigned long *gpa);
gpa               526 arch/mips/kvm/mmu.c 	gpa_t gpa = gfn << PAGE_SHIFT;
gpa               528 arch/mips/kvm/mmu.c 	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
gpa               574 arch/mips/kvm/mmu.c 	gpa_t gpa = gfn << PAGE_SHIFT;
gpa               575 arch/mips/kvm/mmu.c 	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
gpa               610 arch/mips/kvm/mmu.c static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
gpa               615 arch/mips/kvm/mmu.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa               624 arch/mips/kvm/mmu.c 	ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
gpa               686 arch/mips/kvm/mmu.c static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
gpa               692 arch/mips/kvm/mmu.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa               702 arch/mips/kvm/mmu.c 	err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
gpa               753 arch/mips/kvm/mmu.c 	ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
gpa              1007 arch/mips/kvm/mmu.c 	unsigned long gpa;
gpa              1018 arch/mips/kvm/mmu.c 	gpa = KVM_GUEST_CPHYSADDR(badvaddr);
gpa              1020 arch/mips/kvm/mmu.c 	if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
gpa               305 arch/mips/kvm/tlb.c 			    unsigned long *gpa)
gpa               380 arch/mips/kvm/tlb.c 	*gpa = pa;
gpa                25 arch/mips/kvm/trap_emul.c 	gpa_t gpa;
gpa                30 arch/mips/kvm/trap_emul.c 		gpa = CPHYSADDR(gva);
gpa                32 arch/mips/kvm/trap_emul.c 		gpa = KVM_GUEST_CPHYSADDR(gva);
gpa                36 arch/mips/kvm/trap_emul.c 		gpa = KVM_INVALID_ADDR;
gpa                39 arch/mips/kvm/trap_emul.c 	kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
gpa                41 arch/mips/kvm/trap_emul.c 	return gpa;
gpa               715 arch/mips/kvm/vz.c 			     unsigned long *gpa)
gpa               767 arch/mips/kvm/vz.c 			*gpa = pa;
gpa               771 arch/mips/kvm/vz.c 			*gpa = gva32 & 0x1fffffff;
gpa               797 arch/mips/kvm/vz.c 		*gpa = gva & 0x07ffffffffffffff;
gpa               803 arch/mips/kvm/vz.c 	return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
gpa               822 arch/mips/kvm/vz.c 				  unsigned long *gpa)
gpa               829 arch/mips/kvm/vz.c 		*gpa = badvaddr;
gpa               839 arch/mips/kvm/vz.c 	return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
gpa               300 arch/powerpc/include/asm/iommu.h 		unsigned long gpa);
gpa               306 arch/powerpc/include/asm/iommu.h #define iommu_tce_put_param_check(tbl, ioba, gpa)                 \
gpa               310 arch/powerpc/include/asm/iommu.h 		iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
gpa               164 arch/powerpc/include/asm/kvm_book3s.h 			unsigned long gpa, gva_t ea, int is_store);
gpa               197 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
gpa               202 arch/powerpc/include/asm/kvm_book3s.h 				    bool writing, unsigned long gpa,
gpa               205 arch/powerpc/include/asm/kvm_book3s.h 				unsigned long gpa,
gpa               238 arch/powerpc/include/asm/kvm_book3s.h extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa               251 arch/powerpc/include/asm/kvm_book3s.h 			unsigned long gpa, bool dirty);
gpa               624 arch/powerpc/include/asm/kvm_book3s_64.h 			     unsigned long gpa, unsigned int level,
gpa               634 arch/powerpc/include/asm/kvm_book3s_64.h 				unsigned long gpa, unsigned long hpa,
gpa               369 arch/powerpc/include/asm/kvm_host.h 	unsigned long gpa;	/* Current guest phys addr */
gpa              1005 arch/powerpc/kernel/iommu.c int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
gpa              1009 arch/powerpc/kernel/iommu.c 	if (gpa & mask)
gpa               426 arch/powerpc/kvm/book3s.c kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
gpa               430 arch/powerpc/kvm/book3s.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa               436 arch/powerpc/kvm/book3s.c 	gpa &= ~0xFFFULL;
gpa               437 arch/powerpc/kvm/book3s.c 	if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
gpa               429 arch/powerpc/kvm/book3s_64_mmu_hv.c 			   unsigned long gpa, gva_t ea, int is_store)
gpa               442 arch/powerpc/kvm/book3s_64_mmu_hv.c 		ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0,
gpa               486 arch/powerpc/kvm/book3s_64_mmu_hv.c 	vcpu->arch.paddr_accessed = gpa;
gpa               500 arch/powerpc/kvm/book3s_64_mmu_hv.c 	unsigned long gpa, gfn, hva, pfn;
gpa               532 arch/powerpc/kvm/book3s_64_mmu_hv.c 			gpa = gpa_base | (ea & (psize - 1));
gpa               533 arch/powerpc/kvm/book3s_64_mmu_hv.c 			return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
gpa               561 arch/powerpc/kvm/book3s_64_mmu_hv.c 	gpa = gpa_base | (ea & (psize - 1));
gpa               562 arch/powerpc/kvm/book3s_64_mmu_hv.c 	gfn = gpa >> PAGE_SHIFT;
gpa               569 arch/powerpc/kvm/book3s_64_mmu_hv.c 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
gpa              1137 arch/powerpc/kvm/book3s_64_mmu_hv.c 	gfn = vpa->gpa >> PAGE_SHIFT;
gpa              1170 arch/powerpc/kvm/book3s_64_mmu_hv.c void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
gpa              1174 arch/powerpc/kvm/book3s_64_mmu_hv.c 	unsigned long gfn = gpa >> PAGE_SHIFT;
gpa              1191 arch/powerpc/kvm/book3s_64_mmu_hv.c 	offset = gpa & (PAGE_SIZE - 1);
gpa              1201 arch/powerpc/kvm/book3s_64_mmu_hv.c void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
gpa              1215 arch/powerpc/kvm/book3s_64_mmu_hv.c 	gfn = gpa >> PAGE_SHIFT;
gpa               134 arch/powerpc/kvm/book3s_64_mmu_radix.c 	u64 pte, base, gpa;
gpa               186 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gpa = pte & 0x01fffffffffff000ul;
gpa               187 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (gpa & ((1ul << offset) - 1))
gpa               189 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gpa |= eaddr & ((1ul << offset) - 1);
gpa               197 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gpte->raddr = gpa;
gpa               375 arch/powerpc/kvm/book3s_64_mmu_radix.c void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
gpa               382 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long gfn = gpa >> PAGE_SHIFT;
gpa               386 arch/powerpc/kvm/book3s_64_mmu_radix.c 	old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
gpa               387 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
gpa               406 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gpa &= ~(page_size - 1);
gpa               408 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
gpa               523 arch/powerpc/kvm/book3s_64_mmu_radix.c 					unsigned long gpa, unsigned int lpid)
gpa               539 arch/powerpc/kvm/book3s_64_mmu_radix.c 					unsigned long gpa, unsigned int lpid)
gpa               564 arch/powerpc/kvm/book3s_64_mmu_radix.c 		      unsigned long gpa, unsigned int level,
gpa               575 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pgd = pgtable + pgd_index(gpa);
gpa               578 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud = pud_offset(pgd, gpa);
gpa               580 arch/powerpc/kvm/book3s_64_mmu_radix.c 		new_pud = pud_alloc_one(kvm->mm, gpa);
gpa               584 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pmd = pmd_offset(pud, gpa);
gpa               605 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud = pud_offset(pgd, gpa);
gpa               607 arch/powerpc/kvm/book3s_64_mmu_radix.c 		unsigned long hgpa = gpa & PUD_MASK;
gpa               642 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
gpa               644 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
gpa               656 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pmd = pmd_offset(pud, gpa);
gpa               658 arch/powerpc/kvm/book3s_64_mmu_radix.c 		unsigned long lgpa = gpa & PMD_MASK;
gpa               694 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
gpa               696 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
gpa               708 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = pte_offset_kernel(pmd, gpa);
gpa               718 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
gpa               722 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
gpa               739 arch/powerpc/kvm/book3s_64_mmu_radix.c 			     unsigned long gpa, unsigned int lpid)
gpa               758 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
gpa               760 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
gpa               767 arch/powerpc/kvm/book3s_64_mmu_radix.c 				   unsigned long gpa,
gpa               775 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long hva, gfn = gpa >> PAGE_SHIFT;
gpa               836 arch/powerpc/kvm/book3s_64_mmu_radix.c 	    (gpa & (PUD_SIZE - PAGE_SIZE)) ==
gpa               840 arch/powerpc/kvm/book3s_64_mmu_radix.c 		   (gpa & (PMD_SIZE - PAGE_SIZE)) ==
gpa               865 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
gpa               893 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long gpa, gfn;
gpa               912 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gpa = vcpu->arch.fault_gpa & ~0xfffUL;
gpa               913 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gpa &= ~0xF000000000000000ul;
gpa               914 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gfn = gpa >> PAGE_SHIFT;
gpa               916 arch/powerpc/kvm/book3s_64_mmu_radix.c 		gpa |= ea & 0xfff;
gpa               932 arch/powerpc/kvm/book3s_64_mmu_radix.c 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
gpa               949 arch/powerpc/kvm/book3s_64_mmu_radix.c 					    writing, gpa, kvm->arch.lpid))
gpa               959 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
gpa               972 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long gpa = gfn << PAGE_SHIFT;
gpa               975 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
gpa               977 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
gpa               987 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long gpa = gfn << PAGE_SHIFT;
gpa               992 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
gpa               995 arch/powerpc/kvm/book3s_64_mmu_radix.c 					      gpa, shift);
gpa              1012 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long gpa = gfn << PAGE_SHIFT;
gpa              1016 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
gpa              1027 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long gpa = gfn << PAGE_SHIFT;
gpa              1033 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
gpa              1040 arch/powerpc/kvm/book3s_64_mmu_radix.c 					      gpa, shift);
gpa              1041 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
gpa              1082 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long gpa;
gpa              1085 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gpa = memslot->base_gfn << PAGE_SHIFT;
gpa              1088 arch/powerpc/kvm/book3s_64_mmu_radix.c 		ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
gpa              1090 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
gpa              1092 arch/powerpc/kvm/book3s_64_mmu_radix.c 		gpa += PAGE_SIZE;
gpa              1155 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long	gpa;
gpa              1196 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long gpa;
gpa              1232 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gpa = p->gpa;
gpa              1236 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (gpa >= RADIX_PGTABLE_RANGE) {
gpa              1237 arch/powerpc/kvm/book3s_64_mmu_radix.c 			gpa = 0;
gpa              1254 arch/powerpc/kvm/book3s_64_mmu_radix.c 					gpa = RADIX_PGTABLE_RANGE;
gpa              1271 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pgdp = pgt + pgd_index(gpa);
gpa              1274 arch/powerpc/kvm/book3s_64_mmu_radix.c 			gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
gpa              1278 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pudp = pud_offset(&pgd, gpa);
gpa              1281 arch/powerpc/kvm/book3s_64_mmu_radix.c 			gpa = (gpa & PUD_MASK) + PUD_SIZE;
gpa              1290 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pmdp = pmd_offset(&pud, gpa);
gpa              1293 arch/powerpc/kvm/book3s_64_mmu_radix.c 			gpa = (gpa & PMD_MASK) + PMD_SIZE;
gpa              1302 arch/powerpc/kvm/book3s_64_mmu_radix.c 		ptep = pte_offset_kernel(&pmd, gpa);
gpa              1305 arch/powerpc/kvm/book3s_64_mmu_radix.c 			gpa += PAGE_SIZE;
gpa              1311 arch/powerpc/kvm/book3s_64_mmu_radix.c 			      " %lx: %lx %d\n", gpa, pte, shift);
gpa              1312 arch/powerpc/kvm/book3s_64_mmu_radix.c 		gpa += 1ul << shift;
gpa              1330 arch/powerpc/kvm/book3s_64_mmu_radix.c 	p->gpa = gpa;
gpa               352 arch/powerpc/kvm/book3s_64_vio.c 	unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
gpa               361 arch/powerpc/kvm/book3s_64_vio.c 	if (iommu_tce_check_gpa(stt->page_shift, gpa))
gpa               108 arch/powerpc/kvm/book3s_64_vio_hv.c 	unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
gpa               117 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (iommu_tce_check_gpa(stt->page_shift, gpa))
gpa               612 arch/powerpc/kvm/book3s_hv.c 	unsigned long gpa;
gpa               623 arch/powerpc/kvm/book3s_hv.c 		gpa = vpap->next_gpa;
gpa               627 arch/powerpc/kvm/book3s_hv.c 		if (gpa)
gpa               628 arch/powerpc/kvm/book3s_hv.c 			va = kvmppc_pin_guest_page(kvm, gpa, &nb);
gpa               630 arch/powerpc/kvm/book3s_hv.c 		if (gpa == vpap->next_gpa)
gpa               634 arch/powerpc/kvm/book3s_hv.c 			kvmppc_unpin_guest_page(kvm, va, gpa, false);
gpa               644 arch/powerpc/kvm/book3s_hv.c 		kvmppc_unpin_guest_page(kvm, va, gpa, false);
gpa               648 arch/powerpc/kvm/book3s_hv.c 		kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
gpa               650 arch/powerpc/kvm/book3s_hv.c 	vpap->gpa = gpa;
gpa              2422 arch/powerpc/kvm/book3s_hv.c 		kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
gpa               796 arch/powerpc/kvm/book3s_hv_nested.c 	unsigned long gpa;
gpa               800 arch/powerpc/kvm/book3s_hv_nested.c 	gpa = n_rmap & RMAP_NESTED_GPA_MASK;
gpa               807 arch/powerpc/kvm/book3s_hv_nested.c 	ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
gpa               816 arch/powerpc/kvm/book3s_hv_nested.c 		kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
gpa               846 arch/powerpc/kvm/book3s_hv_nested.c 	unsigned long gpa;
gpa               850 arch/powerpc/kvm/book3s_hv_nested.c 	gpa = n_rmap & RMAP_NESTED_GPA_MASK;
gpa               857 arch/powerpc/kvm/book3s_hv_nested.c 	ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
gpa               860 arch/powerpc/kvm/book3s_hv_nested.c 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
gpa               879 arch/powerpc/kvm/book3s_hv_nested.c 				  unsigned long gpa, unsigned long hpa,
gpa               887 arch/powerpc/kvm/book3s_hv_nested.c 	gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
gpa               916 arch/powerpc/kvm/book3s_hv_nested.c 					long gpa, int *shift_ret)
gpa               924 arch/powerpc/kvm/book3s_hv_nested.c 	ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
gpa               928 arch/powerpc/kvm/book3s_hv_nested.c 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
gpa              1273 arch/powerpc/kvm/book3s_hv_nested.c 	unsigned long n_gpa, gpa, gfn, perm = 0UL;
gpa              1330 arch/powerpc/kvm/book3s_hv_nested.c 	gpa = gpte.raddr;
gpa              1331 arch/powerpc/kvm/book3s_hv_nested.c 	gfn = gpa >> PAGE_SHIFT;
gpa              1344 arch/powerpc/kvm/book3s_hv_nested.c 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
gpa              1365 arch/powerpc/kvm/book3s_hv_nested.c 	pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
gpa              1374 arch/powerpc/kvm/book3s_hv_nested.c 		ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
gpa              1383 arch/powerpc/kvm/book3s_hv_nested.c 	gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
gpa              1403 arch/powerpc/kvm/book3s_hv_nested.c 		pte = __pte(pte_val(pte) | (gpa & mask));
gpa               201 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	unsigned long i, pa, gpa, gfn, psize;
gpa               230 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
gpa               231 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	gfn = gpa >> PAGE_SHIFT;
gpa               287 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			pa |= gpa & ~PAGE_MASK;
gpa               891 arch/powerpc/kvm/book3s_hv_rm_mmu.c static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
gpa               902 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	gfn = gpa >> PAGE_SHIFT;
gpa               923 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	pa |= gpa & ~PAGE_MASK;
gpa               652 arch/powerpc/kvm/book3s_pr.c static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
gpa               659 arch/powerpc/kvm/book3s_pr.c 	gpa &= ~0xFFFULL;
gpa               660 arch/powerpc/kvm/book3s_pr.c 	if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
gpa               664 arch/powerpc/kvm/book3s_pr.c 	return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
gpa               255 arch/powerpc/kvm/e500.h 	gpa_t gpa;
gpa               267 arch/powerpc/kvm/e500.h 	gpa = get_tlb_raddr(tlbe);
gpa               268 arch/powerpc/kvm/e500.h 	if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
gpa               588 arch/s390/kvm/gaccess.c static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
gpa               590 arch/s390/kvm/gaccess.c 	return kvm_read_guest(kvm, gpa, val, sizeof(*val));
gpa               615 arch/s390/kvm/gaccess.c 				     unsigned long *gpa, const union asce asce,
gpa               774 arch/s390/kvm/gaccess.c 	*gpa = raddr.addr;
gpa               834 arch/s390/kvm/gaccess.c 	unsigned long _len, nr_pages, gpa, idx;
gpa               858 arch/s390/kvm/gaccess.c 		gpa = *(pages + idx) + (ga & ~PAGE_MASK);
gpa               859 arch/s390/kvm/gaccess.c 		_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
gpa               861 arch/s390/kvm/gaccess.c 			rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
gpa               863 arch/s390/kvm/gaccess.c 			rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
gpa               878 arch/s390/kvm/gaccess.c 	unsigned long _len, gpa;
gpa               882 arch/s390/kvm/gaccess.c 		gpa = kvm_s390_real_to_abs(vcpu, gra);
gpa               883 arch/s390/kvm/gaccess.c 		_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
gpa               885 arch/s390/kvm/gaccess.c 			rc = write_guest_abs(vcpu, gpa, data, _len);
gpa               887 arch/s390/kvm/gaccess.c 			rc = read_guest_abs(vcpu, gpa, data, _len);
gpa               905 arch/s390/kvm/gaccess.c 			    unsigned long *gpa, enum gacc_mode mode)
gpa               923 arch/s390/kvm/gaccess.c 		rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot);
gpa               927 arch/s390/kvm/gaccess.c 		*gpa = kvm_s390_real_to_abs(vcpu, gva);
gpa               928 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, *gpa))
gpa               941 arch/s390/kvm/gaccess.c 	unsigned long gpa;
gpa               948 arch/s390/kvm/gaccess.c 		rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
gpa               124 arch/s390/kvm/gaccess.h 	unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
gpa               126 arch/s390/kvm/gaccess.h 	return kvm_write_guest(vcpu->kvm, gpa, data, len);
gpa               150 arch/s390/kvm/gaccess.h 	unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
gpa               152 arch/s390/kvm/gaccess.h 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
gpa               162 arch/s390/kvm/gaccess.h 			    u8 ar, unsigned long *gpa, enum gacc_mode mode);
gpa               282 arch/s390/kvm/gaccess.h int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
gpa               285 arch/s390/kvm/gaccess.h 	return kvm_write_guest(vcpu->kvm, gpa, data, len);
gpa               305 arch/s390/kvm/gaccess.h int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
gpa               308 arch/s390/kvm/gaccess.h 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
gpa              3613 arch/s390/kvm/kvm-s390.c long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
gpa              3615 arch/s390/kvm/kvm-s390.c 	return gmap_fault(vcpu->arch.gmap, gpa,
gpa              4065 arch/s390/kvm/kvm-s390.c int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
gpa              4074 arch/s390/kvm/kvm-s390.c 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
gpa              4077 arch/s390/kvm/kvm-s390.c 		gpa = 0;
gpa              4078 arch/s390/kvm/kvm-s390.c 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
gpa              4081 arch/s390/kvm/kvm-s390.c 		gpa = px;
gpa              4083 arch/s390/kvm/kvm-s390.c 		gpa -= __LC_FPREGS_SAVE_AREA;
gpa              4088 arch/s390/kvm/kvm-s390.c 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
gpa              4091 arch/s390/kvm/kvm-s390.c 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
gpa              4094 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
gpa              4096 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
gpa              4098 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
gpa              4100 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
gpa              4102 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
gpa              4105 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
gpa              4108 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
gpa              4110 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
gpa              4112 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
gpa               286 arch/s390/kvm/kvm-s390.h long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
gpa              1438 arch/s390/kvm/priv.c 	unsigned long hva, gpa;
gpa              1457 arch/s390/kvm/priv.c 	ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
gpa              1461 arch/s390/kvm/priv.c 		ret = guest_translate_address(vcpu, address1, ar, &gpa,
gpa              1475 arch/s390/kvm/priv.c 	hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
gpa               643 arch/s390/kvm/vsie.c static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
gpa               647 arch/s390/kvm/vsie.c 	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
gpa               650 arch/s390/kvm/vsie.c 	*hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
gpa               655 arch/s390/kvm/vsie.c static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
gpa               659 arch/s390/kvm/vsie.c 	mark_page_dirty(kvm, gpa_to_gfn(gpa));
gpa               724 arch/s390/kvm/vsie.c 	gpa_t gpa;
gpa               727 arch/s390/kvm/vsie.c 	gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
gpa               729 arch/s390/kvm/vsie.c 		gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
gpa               730 arch/s390/kvm/vsie.c 	if (gpa) {
gpa               731 arch/s390/kvm/vsie.c 		if (gpa < 2 * PAGE_SIZE)
gpa               733 arch/s390/kvm/vsie.c 		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
gpa               735 arch/s390/kvm/vsie.c 		else if ((gpa & PAGE_MASK) !=
gpa               736 arch/s390/kvm/vsie.c 			 ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
gpa               739 arch/s390/kvm/vsie.c 			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
gpa               745 arch/s390/kvm/vsie.c 		vsie_page->sca_gpa = gpa;
gpa               750 arch/s390/kvm/vsie.c 	gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
gpa               751 arch/s390/kvm/vsie.c 	if (gpa && (scb_s->ecb & ECB_TE)) {
gpa               752 arch/s390/kvm/vsie.c 		if (gpa < 2 * PAGE_SIZE) {
gpa               757 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
gpa               762 arch/s390/kvm/vsie.c 		vsie_page->itdba_gpa = gpa;
gpa               766 arch/s390/kvm/vsie.c 	gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
gpa               767 arch/s390/kvm/vsie.c 	if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
gpa               768 arch/s390/kvm/vsie.c 		if (gpa < 2 * PAGE_SIZE) {
gpa               776 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
gpa               781 arch/s390/kvm/vsie.c 		vsie_page->gvrd_gpa = gpa;
gpa               785 arch/s390/kvm/vsie.c 	gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
gpa               786 arch/s390/kvm/vsie.c 	if (gpa && (scb_s->ecb3 & ECB3_RI)) {
gpa               787 arch/s390/kvm/vsie.c 		if (gpa < 2 * PAGE_SIZE) {
gpa               792 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
gpa               798 arch/s390/kvm/vsie.c 		vsie_page->riccbd_gpa = gpa;
gpa               805 arch/s390/kvm/vsie.c 		gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
gpa               807 arch/s390/kvm/vsie.c 		if (!gpa || gpa < 2 * PAGE_SIZE) {
gpa               815 arch/s390/kvm/vsie.c 		if (gpa & ((1 << sdnxc) - 1)) {
gpa               822 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
gpa               827 arch/s390/kvm/vsie.c 		vsie_page->sdnx_gpa = gpa;
gpa               838 arch/s390/kvm/vsie.c 		      gpa_t gpa)
gpa               843 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, gpa, hpa);
gpa               854 arch/s390/kvm/vsie.c 		   gpa_t gpa)
gpa               859 arch/s390/kvm/vsie.c 	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
gpa               389 arch/x86/include/asm/kvm_host.h 	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
gpa              1287 arch/x86/include/asm/kvm_host.h int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              1440 arch/x86/include/asm/kvm_host.h gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
gpa              1464 arch/x86/include/asm/kvm_host.h static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
gpa              1467 arch/x86/include/asm/kvm_host.h 	return gpa;
gpa              1598 arch/x86/include/asm/kvm_host.h int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
gpa              1599 arch/x86/include/asm/kvm_host.h int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
gpa                35 arch/x86/include/asm/kvm_page_track.h 	void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
gpa                72 arch/x86/include/asm/kvm_page_track.h void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
gpa               524 arch/x86/include/asm/uv/uv_hub.h uv_gpa_in_mmr_space(unsigned long gpa)
gpa               526 arch/x86/include/asm/uv/uv_hub.h 	return (gpa >> 62) == 0x3UL;
gpa               530 arch/x86/include/asm/uv/uv_hub.h static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
gpa               538 arch/x86/include/asm/uv/uv_hub.h 		gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
gpa               539 arch/x86/include/asm/uv/uv_hub.h 			((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
gpa               541 arch/x86/include/asm/uv/uv_hub.h 	paddr = gpa & uv_hub_info->gpa_mask;
gpa               548 arch/x86/include/asm/uv/uv_hub.h static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
gpa               553 arch/x86/include/asm/uv/uv_hub.h 		return gpa >> n_lshift;
gpa               555 arch/x86/include/asm/uv/uv_hub.h 	return uv_gam_range(gpa)->nasid >> 1;
gpa               559 arch/x86/include/asm/uv/uv_hub.h static inline int uv_gpa_to_pnode(unsigned long gpa)
gpa               561 arch/x86/include/asm/uv/uv_hub.h 	return uv_gpa_to_gnode(gpa) & uv_hub_info->pnode_mask;
gpa               565 arch/x86/include/asm/uv/uv_hub.h static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
gpa               570 arch/x86/include/asm/uv/uv_hub.h 		return (gpa << m_shift) >> m_shift;
gpa               572 arch/x86/include/asm/uv/uv_hub.h 	return (gpa & uv_hub_info->gpa_mask) - uv_gam_range_base(gpa);
gpa              1565 arch/x86/kvm/hyperv.c 		gpa_t gpa = param;
gpa              1567 arch/x86/kvm/hyperv.c 		if ((gpa & (__alignof__(param) - 1)) ||
gpa              1568 arch/x86/kvm/hyperv.c 		    offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
gpa              1571 arch/x86/kvm/hyperv.c 		ret = kvm_vcpu_read_guest(vcpu, gpa, &param, sizeof(param));
gpa              2691 arch/x86/kvm/lapic.c 	if (addr == ghc->gpa && len <= ghc->len)
gpa               461 arch/x86/kvm/mmu.c 	u64 gpa = gfn << PAGE_SHIFT;
gpa               465 arch/x86/kvm/mmu.c 	mask |= gpa | shadow_nonpresent_or_rsvd_mask;
gpa               466 arch/x86/kvm/mmu.c 	mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
gpa               475 arch/x86/kvm/mmu.c 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
gpa               477 arch/x86/kvm/mmu.c 	gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
gpa               480 arch/x86/kvm/mmu.c 	return gpa >> PAGE_SHIFT;
gpa              3325 arch/x86/kvm/mmu.c static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
gpa              3332 arch/x86/kvm/mmu.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              3338 arch/x86/kvm/mmu.c 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
gpa              3339 arch/x86/kvm/mmu.c 	for_each_shadow_entry(vcpu, gpa, it) {
gpa              3646 arch/x86/kvm/mmu.c static int nonpaging_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
gpa              3672 arch/x86/kvm/mmu.c 	if (fast_page_fault(vcpu, gpa, level, error_code))
gpa              3678 arch/x86/kvm/mmu.c 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
gpa              3681 arch/x86/kvm/mmu.c 	if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r))
gpa              3692 arch/x86/kvm/mmu.c 	r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
gpa              4158 arch/x86/kvm/mmu.c static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              4161 arch/x86/kvm/mmu.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              4165 arch/x86/kvm/mmu.c 	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
gpa              4177 arch/x86/kvm/mmu.c 	return nonpaging_map(vcpu, gpa & PAGE_MASK,
gpa              4278 arch/x86/kvm/mmu.c static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
gpa              4285 arch/x86/kvm/mmu.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              4312 arch/x86/kvm/mmu.c 	if (fast_page_fault(vcpu, gpa, level, error_code))
gpa              4318 arch/x86/kvm/mmu.c 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
gpa              4332 arch/x86/kvm/mmu.c 	r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
gpa              5329 arch/x86/kvm/mmu.c static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
gpa              5342 arch/x86/kvm/mmu.c 		*gpa &= ~(gpa_t)7;
gpa              5347 arch/x86/kvm/mmu.c 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
gpa              5376 arch/x86/kvm/mmu.c static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
gpa              5382 arch/x86/kvm/mmu.c 		 gpa, bytes, sp->role.word);
gpa              5384 arch/x86/kvm/mmu.c 	offset = offset_in_page(gpa);
gpa              5400 arch/x86/kvm/mmu.c static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
gpa              5406 arch/x86/kvm/mmu.c 	page_offset = offset_in_page(gpa);
gpa              5431 arch/x86/kvm/mmu.c static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              5435 arch/x86/kvm/mmu.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              5451 arch/x86/kvm/mmu.c 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
gpa              5462 arch/x86/kvm/mmu.c 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
gpa              5468 arch/x86/kvm/mmu.c 		if (detect_write_misaligned(sp, gpa, bytes) ||
gpa              5475 arch/x86/kvm/mmu.c 		spte = get_written_sptes(sp, gpa, &npte);
gpa              5501 arch/x86/kvm/mmu.c 	gpa_t gpa;
gpa              5507 arch/x86/kvm/mmu.c 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
gpa              5509 arch/x86/kvm/mmu.c 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
gpa               106 arch/x86/kvm/mmutrace.h 		__field(__u64, gpa)
gpa               110 arch/x86/kvm/mmutrace.h 		__entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
gpa               114 arch/x86/kvm/mmutrace.h 	TP_printk("gpa %llx", __entry->gpa)
gpa               223 arch/x86/kvm/page_track.c void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
gpa               238 arch/x86/kvm/page_track.c 			n->track_write(vcpu, gpa, new, bytes, n);
gpa               953 arch/x86/kvm/paging_tmpl.h 	gpa_t gpa = UNMAPPED_GVA;
gpa               959 arch/x86/kvm/paging_tmpl.h 		gpa = gfn_to_gpa(walker.gfn);
gpa               960 arch/x86/kvm/paging_tmpl.h 		gpa |= addr & ~PAGE_MASK;
gpa               964 arch/x86/kvm/paging_tmpl.h 	return gpa;
gpa               974 arch/x86/kvm/paging_tmpl.h 	gpa_t gpa = UNMAPPED_GVA;
gpa               985 arch/x86/kvm/paging_tmpl.h 		gpa = gfn_to_gpa(walker.gfn);
gpa               986 arch/x86/kvm/paging_tmpl.h 		gpa |= vaddr & ~PAGE_MASK;
gpa               990 arch/x86/kvm/paging_tmpl.h 	return gpa;
gpa              3155 arch/x86/kvm/svm.c 	u64 gpa;
gpa              3163 arch/x86/kvm/svm.c 	gpa  = svm->nested.vmcb_iopm + (port / 8);
gpa              3169 arch/x86/kvm/svm.c 	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
gpa               136 arch/x86/kvm/trace.h 	TP_PROTO(u64 gpa),
gpa               137 arch/x86/kvm/trace.h 	TP_ARGS(gpa),
gpa               140 arch/x86/kvm/trace.h 		__field(u64,	gpa)
gpa               144 arch/x86/kvm/trace.h 		__entry->gpa		= gpa;
gpa               147 arch/x86/kvm/trace.h 	TP_printk("fast mmio at gpa 0x%llx", __entry->gpa)
gpa               772 arch/x86/kvm/trace.h 	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
gpa               773 arch/x86/kvm/trace.h 	TP_ARGS(gva, gpa, write, gpa_match),
gpa               777 arch/x86/kvm/trace.h 		__field(gpa_t, gpa)
gpa               784 arch/x86/kvm/trace.h 		__entry->gpa = gpa;
gpa               789 arch/x86/kvm/trace.h 	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
gpa               440 arch/x86/kvm/vmx/nested.c static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
gpa               442 arch/x86/kvm/vmx/nested.c 	return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
gpa               897 arch/x86/kvm/vmx/nested.c static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
gpa               907 arch/x86/kvm/vmx/nested.c 		if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
gpa               911 arch/x86/kvm/vmx/nested.c 				__func__, i, gpa + i * sizeof(e));
gpa               932 arch/x86/kvm/vmx/nested.c static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
gpa               944 arch/x86/kvm/vmx/nested.c 					gpa + i * sizeof(e),
gpa               948 arch/x86/kvm/vmx/nested.c 				__func__, i, gpa + i * sizeof(e));
gpa               964 arch/x86/kvm/vmx/nested.c 					 gpa + i * sizeof(e) +
gpa              3965 arch/x86/kvm/vmx/nested.c 	gpa_t gpa;
gpa              4025 arch/x86/kvm/vmx/nested.c 		gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
gpa              4026 arch/x86/kvm/vmx/nested.c 		if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
gpa              4029 arch/x86/kvm/vmx/nested.c 				__func__, i, gpa);
gpa              4034 arch/x86/kvm/vmx/nested.c 			gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
gpa              4035 arch/x86/kvm/vmx/nested.c 			if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
gpa              4038 arch/x86/kvm/vmx/nested.c 					__func__, j, gpa);
gpa              4909 arch/x86/kvm/vmx/nested.c 		u64 eptp, gpa;
gpa                22 arch/x86/kvm/vmx/ops.h void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
gpa               262 arch/x86/kvm/vmx/ops.h static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
gpa               265 arch/x86/kvm/vmx/ops.h 		u64 eptp, gpa;
gpa               266 arch/x86/kvm/vmx/ops.h 	} operand = {eptp, gpa};
gpa               268 arch/x86/kvm/vmx/ops.h 	vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
gpa               387 arch/x86/kvm/vmx/vmx.c noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa)
gpa               390 arch/x86/kvm/vmx/vmx.c 			ext, eptp, gpa);
gpa              5150 arch/x86/kvm/vmx/vmx.c 	gpa_t gpa;
gpa              5166 arch/x86/kvm/vmx/vmx.c 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
gpa              5167 arch/x86/kvm/vmx/vmx.c 	trace_kvm_page_fault(gpa, exit_qualification);
gpa              5188 arch/x86/kvm/vmx/vmx.c 	return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
gpa              5193 arch/x86/kvm/vmx/vmx.c 	gpa_t gpa;
gpa              5199 arch/x86/kvm/vmx/vmx.c 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
gpa              5201 arch/x86/kvm/vmx/vmx.c 	    !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
gpa              5202 arch/x86/kvm/vmx/vmx.c 		trace_kvm_fast_mmio(gpa);
gpa              5206 arch/x86/kvm/vmx/vmx.c 	return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
gpa              5636 arch/x86/kvm/vmx/vmx.c 		u64 gpa;
gpa              5638 arch/x86/kvm/vmx/vmx.c 		gpa = pml_buf[pml_idx];
gpa              5639 arch/x86/kvm/vmx/vmx.c 		WARN_ON(gpa & (PAGE_SIZE - 1));
gpa              5640 arch/x86/kvm/vmx/vmx.c 		kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
gpa              7279 arch/x86/kvm/vmx/vmx.c 	gpa_t gpa, dst;
gpa              7298 arch/x86/kvm/vmx/vmx.c 		gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
gpa              7301 arch/x86/kvm/vmx/vmx.c 		if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
gpa              7302 arch/x86/kvm/vmx/vmx.c 					 offset_in_page(dst), sizeof(gpa)))
gpa              2611 arch/x86/kvm/x86.c 	gpa_t gpa = data & ~0x3f;
gpa              2625 arch/x86/kvm/x86.c 	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
gpa              5342 arch/x86/kvm/x86.c gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
gpa              5351 arch/x86/kvm/x86.c 	t_gpa  = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
gpa              5394 arch/x86/kvm/x86.c 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
gpa              5400 arch/x86/kvm/x86.c 		if (gpa == UNMAPPED_GVA)
gpa              5402 arch/x86/kvm/x86.c 		ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
gpa              5428 arch/x86/kvm/x86.c 	gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
gpa              5430 arch/x86/kvm/x86.c 	if (unlikely(gpa == UNMAPPED_GVA))
gpa              5436 arch/x86/kvm/x86.c 	ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
gpa              5492 arch/x86/kvm/x86.c 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
gpa              5499 arch/x86/kvm/x86.c 		if (gpa == UNMAPPED_GVA)
gpa              5501 arch/x86/kvm/x86.c 		ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
gpa              5566 arch/x86/kvm/x86.c 			    gpa_t gpa, bool write)
gpa              5569 arch/x86/kvm/x86.c 	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
gpa              5572 arch/x86/kvm/x86.c 	if (vcpu_match_mmio_gpa(vcpu, gpa)) {
gpa              5573 arch/x86/kvm/x86.c 		trace_vcpu_match_mmio(gva, gpa, write, true);
gpa              5581 arch/x86/kvm/x86.c 				gpa_t *gpa, struct x86_exception *exception,
gpa              5595 arch/x86/kvm/x86.c 		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
gpa              5597 arch/x86/kvm/x86.c 		trace_vcpu_match_mmio(gva, *gpa, write, false);
gpa              5601 arch/x86/kvm/x86.c 	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
gpa              5603 arch/x86/kvm/x86.c 	if (*gpa == UNMAPPED_GVA)
gpa              5606 arch/x86/kvm/x86.c 	return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
gpa              5609 arch/x86/kvm/x86.c int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              5614 arch/x86/kvm/x86.c 	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
gpa              5617 arch/x86/kvm/x86.c 	kvm_page_track_write(vcpu, gpa, val, bytes);
gpa              5624 arch/x86/kvm/x86.c 	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              5626 arch/x86/kvm/x86.c 	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              5628 arch/x86/kvm/x86.c 	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              5637 arch/x86/kvm/x86.c 			       vcpu->mmio_fragments[0].gpa, val);
gpa              5645 arch/x86/kvm/x86.c static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              5648 arch/x86/kvm/x86.c 	return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
gpa              5651 arch/x86/kvm/x86.c static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              5654 arch/x86/kvm/x86.c 	return emulator_write_phys(vcpu, gpa, val, bytes);
gpa              5657 arch/x86/kvm/x86.c static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
gpa              5659 arch/x86/kvm/x86.c 	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
gpa              5660 arch/x86/kvm/x86.c 	return vcpu_mmio_write(vcpu, gpa, bytes, val);
gpa              5663 arch/x86/kvm/x86.c static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              5666 arch/x86/kvm/x86.c 	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
gpa              5670 arch/x86/kvm/x86.c static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              5699 arch/x86/kvm/x86.c 	gpa_t gpa;
gpa              5715 arch/x86/kvm/x86.c 		gpa = vcpu->arch.gpa_val;
gpa              5716 arch/x86/kvm/x86.c 		ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
gpa              5718 arch/x86/kvm/x86.c 		ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
gpa              5723 arch/x86/kvm/x86.c 	if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
gpa              5729 arch/x86/kvm/x86.c 	handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
gpa              5733 arch/x86/kvm/x86.c 	gpa += handled;
gpa              5739 arch/x86/kvm/x86.c 	frag->gpa = gpa;
gpa              5752 arch/x86/kvm/x86.c 	gpa_t gpa;
gpa              5786 arch/x86/kvm/x86.c 	gpa = vcpu->mmio_fragments[0].gpa;
gpa              5794 arch/x86/kvm/x86.c 	vcpu->run->mmio.phys_addr = gpa;
gpa              5796 arch/x86/kvm/x86.c 	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
gpa              5838 arch/x86/kvm/x86.c 	gpa_t gpa;
gpa              5846 arch/x86/kvm/x86.c 	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
gpa              5848 arch/x86/kvm/x86.c 	if (gpa == UNMAPPED_GVA ||
gpa              5849 arch/x86/kvm/x86.c 	    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
gpa              5852 arch/x86/kvm/x86.c 	if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
gpa              5855 arch/x86/kvm/x86.c 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
gpa              5858 arch/x86/kvm/x86.c 	kaddr = map.hva + offset_in_page(gpa);
gpa              5882 arch/x86/kvm/x86.c 	kvm_page_track_write(vcpu, gpa, new, bytes);
gpa              6438 arch/x86/kvm/x86.c 	gpa_t gpa = cr2_or_gpa;
gpa              6452 arch/x86/kvm/x86.c 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
gpa              6458 arch/x86/kvm/x86.c 		if (gpa == UNMAPPED_GVA)
gpa              6468 arch/x86/kvm/x86.c 	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
gpa              6488 arch/x86/kvm/x86.c 			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
gpa              6498 arch/x86/kvm/x86.c 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
gpa              6512 arch/x86/kvm/x86.c 	unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
gpa              6548 arch/x86/kvm/x86.c 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
gpa              6550 arch/x86/kvm/x86.c 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
gpa              8489 arch/x86/kvm/x86.c 		frag->gpa += len;
gpa              8504 arch/x86/kvm/x86.c 	run->mmio.phys_addr = frag->gpa;
gpa              9027 arch/x86/kvm/x86.c 	gpa_t gpa;
gpa              9033 arch/x86/kvm/x86.c 	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
gpa              9035 arch/x86/kvm/x86.c 	tr->physical_address = gpa;
gpa              9036 arch/x86/kvm/x86.c 	tr->valid = gpa != UNMAPPED_GVA;
gpa              9622 arch/x86/kvm/x86.c int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
gpa              9659 arch/x86/kvm/x86.c 		m.guest_phys_addr = gpa;
gpa              9674 arch/x86/kvm/x86.c int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
gpa              9679 arch/x86/kvm/x86.c 	r = __x86_set_memory_region(kvm, id, gpa, size);
gpa               232 arch/x86/kvm/x86.h static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
gpa               235 arch/x86/kvm/x86.h 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
gpa              1715 arch/x86/platform/uv/tlb_uv.c 	unsigned long gpa;
gpa              1733 arch/x86/platform/uv/tlb_uv.c 	gpa = uv_gpa(bau_desc);
gpa              1734 arch/x86/platform/uv/tlb_uv.c 	n = uv_gpa_to_gnode(gpa);
gpa              1735 arch/x86/platform/uv/tlb_uv.c 	m = ops.bau_gpa_to_offset(gpa);
gpa                53 arch/x86/xen/mmu_hvm.c 	a.gpa = __pa(mm->pgd);
gpa                64 arch/x86/xen/mmu_hvm.c 	a.gpa = 0x00;
gpa              1696 drivers/gpu/drm/i915/gvt/cmd_parser.c 	unsigned long gpa;
gpa              1699 drivers/gpu/drm/i915/gvt/cmd_parser.c 		gpa = intel_vgpu_gma_to_gpa(mm, gma);
gpa              1700 drivers/gpu/drm/i915/gvt/cmd_parser.c 		if (gpa == INTEL_GVT_INVALID_ADDR) {
gpa              1710 drivers/gpu/drm/i915/gvt/cmd_parser.c 		intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
gpa               302 drivers/gpu/drm/i915/gvt/gtt.c 		unsigned long index, bool hypervisor_access, unsigned long gpa,
gpa               312 drivers/gpu/drm/i915/gvt/gtt.c 		ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
gpa               327 drivers/gpu/drm/i915/gvt/gtt.c 		unsigned long index, bool hypervisor_access, unsigned long gpa,
gpa               337 drivers/gpu/drm/i915/gvt/gtt.c 		ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
gpa               781 drivers/gpu/drm/i915/gvt/gtt.c 		u64 gpa, void *data, int bytes)
gpa               790 drivers/gpu/drm/i915/gvt/gtt.c 	ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
gpa              2074 drivers/gpu/drm/i915/gvt/gtt.c 	unsigned long gpa = INTEL_GVT_INVALID_ADDR;
gpa              2090 drivers/gpu/drm/i915/gvt/gtt.c 		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
gpa              2093 drivers/gpu/drm/i915/gvt/gtt.c 		trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
gpa              2130 drivers/gpu/drm/i915/gvt/gtt.c 		gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
gpa              2133 drivers/gpu/drm/i915/gvt/gtt.c 				    mm->ppgtt_mm.root_entry_type, gma, gpa);
gpa              2136 drivers/gpu/drm/i915/gvt/gtt.c 	return gpa;
gpa                53 drivers/gpu/drm/i915/gvt/gtt.h 			 unsigned long gpa,
gpa                59 drivers/gpu/drm/i915/gvt/gtt.h 			 unsigned long gpa,
gpa               547 drivers/gpu/drm/i915/gvt/gvt.h int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
gpa                55 drivers/gpu/drm/i915/gvt/hypercall.h 	int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
gpa                57 drivers/gpu/drm/i915/gvt/hypercall.h 	int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
gpa              1702 drivers/gpu/drm/i915/gvt/kvmgt.c static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              1709 drivers/gpu/drm/i915/gvt/kvmgt.c 	if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
gpa              1710 drivers/gpu/drm/i915/gvt/kvmgt.c 		intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
gpa              1962 drivers/gpu/drm/i915/gvt/kvmgt.c static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
gpa              1983 drivers/gpu/drm/i915/gvt/kvmgt.c 	ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
gpa              1984 drivers/gpu/drm/i915/gvt/kvmgt.c 		      kvm_read_guest(kvm, gpa, buf, len);
gpa              1995 drivers/gpu/drm/i915/gvt/kvmgt.c static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
gpa              1998 drivers/gpu/drm/i915/gvt/kvmgt.c 	return kvmgt_rw_gpa(handle, gpa, buf, len, false);
gpa              2001 drivers/gpu/drm/i915/gvt/kvmgt.c static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
gpa              2004 drivers/gpu/drm/i915/gvt/kvmgt.c 	return kvmgt_rw_gpa(handle, gpa, buf, len, true);
gpa                47 drivers/gpu/drm/i915/gvt/mmio.c int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
gpa                50 drivers/gpu/drm/i915/gvt/mmio.c 	return gpa - gttmmio_gpa;
gpa                85 drivers/gpu/drm/i915/gvt/mmio.h int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
gpa               193 drivers/gpu/drm/i915/gvt/mpt.h 		unsigned long gpa, void *buf, unsigned long len)
gpa               195 drivers/gpu/drm/i915/gvt/mpt.h 	return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
gpa               209 drivers/gpu/drm/i915/gvt/mpt.h 		unsigned long gpa, void *buf, unsigned long len)
gpa               211 drivers/gpu/drm/i915/gvt/mpt.h 	return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
gpa               295 drivers/gpu/drm/i915/gvt/opregion.c int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
gpa               305 drivers/gpu/drm/i915/gvt/opregion.c 			vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
gpa               316 drivers/gpu/drm/i915/gvt/opregion.c 			vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
gpa               159 drivers/gpu/drm/i915/gvt/page_track.c int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
gpa               167 drivers/gpu/drm/i915/gvt/page_track.c 	page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
gpa               175 drivers/gpu/drm/i915/gvt/page_track.c 		intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
gpa               177 drivers/gpu/drm/i915/gvt/page_track.c 		ret = page_track->handler(page_track, gpa, data, bytes);
gpa               179 drivers/gpu/drm/i915/gvt/page_track.c 			gvt_err("guest page write error, gpa %llx\n", gpa);
gpa                32 drivers/gpu/drm/i915/gvt/page_track.h 			u64 gpa, void *data, int bytes);
gpa                53 drivers/gpu/drm/i915/gvt/page_track.h int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
gpa              1419 drivers/gpu/drm/i915/gvt/scheduler.c 	u64 gpa;
gpa              1422 drivers/gpu/drm/i915/gvt/scheduler.c 	gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
gpa              1426 drivers/gpu/drm/i915/gvt/scheduler.c 				gpa + i * 8, &pdp[7 - i], 4);
gpa               117 drivers/gpu/drm/i915/gvt/trace.h 		unsigned long gma, unsigned long gpa),
gpa               119 drivers/gpu/drm/i915/gvt/trace.h 	TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa),
gpa               128 drivers/gpu/drm/i915/gvt/trace.h 			id, type, ring_id, root_entry_type, gma, gpa);
gpa               359 drivers/misc/sgi-gru/gru_instructions.h static inline void gru_vload_phys(void *cb, unsigned long gpa,
gpa               364 drivers/misc/sgi-gru/gru_instructions.h 	ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
gpa               371 drivers/misc/sgi-gru/gru_instructions.h static inline void gru_vstore_phys(void *cb, unsigned long gpa,
gpa               376 drivers/misc/sgi-gru/gru_instructions.h 	ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
gpa               253 drivers/misc/sgi-gru/grufault.c 		    int write, int atomic, unsigned long *gpa, int *pageshift)
gpa               279 drivers/misc/sgi-gru/grufault.c 	*gpa = uv_soc_phys_ram_to_gpa(paddr);
gpa               316 drivers/misc/sgi-gru/grufault.c 	unsigned long vaddr = 0, gpa;
gpa               332 drivers/misc/sgi-gru/grufault.c 		ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
gpa               333 drivers/misc/sgi-gru/grufault.c 		if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
gpa               339 drivers/misc/sgi-gru/grufault.c 			vaddr, asid, write, pageshift, gpa);
gpa               363 drivers/misc/sgi-gru/grufault.c 	unsigned long gpa = 0, vaddr = 0;
gpa               413 drivers/misc/sgi-gru/grufault.c 	ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
gpa               434 drivers/misc/sgi-gru/grufault.c 	tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
gpa               440 drivers/misc/sgi-gru/grufault.c 		indexway, write, pageshift, gpa);
gpa               904 drivers/misc/sgi-gru/grukservices.c int gru_read_gpa(unsigned long *value, unsigned long gpa)
gpa               913 drivers/misc/sgi-gru/grukservices.c 	iaa = gpa >> 62;
gpa               914 drivers/misc/sgi-gru/grukservices.c 	gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
gpa               131 drivers/misc/sgi-gru/grukservices.h int gru_read_gpa(unsigned long *value, unsigned long gpa);
gpa                47 drivers/misc/sgi-xp/xp_main.c unsigned long (*xp_socket_pa) (unsigned long gpa);
gpa                39 drivers/misc/sgi-xp/xp_uv.c xp_socket_pa_uv(unsigned long gpa)
gpa                41 drivers/misc/sgi-xp/xp_uv.c 	return uv_gpa_to_soc_phys_ram(gpa);
gpa               258 include/linux/kvm_host.h 	gpa_t gpa;
gpa               734 include/linux/kvm_host.h int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
gpa               736 include/linux/kvm_host.h int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
gpa               741 include/linux/kvm_host.h int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
gpa               749 include/linux/kvm_host.h 			      gpa_t gpa, unsigned long len);
gpa               751 include/linux/kvm_host.h int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
gpa               761 include/linux/kvm_host.h int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
gpa               772 include/linux/kvm_host.h int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
gpa               774 include/linux/kvm_host.h int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
gpa               778 include/linux/kvm_host.h int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
gpa              1069 include/linux/kvm_host.h static inline gfn_t gpa_to_gfn(gpa_t gpa)
gpa              1071 include/linux/kvm_host.h 	return (gfn_t)(gpa >> PAGE_SHIFT);
gpa              1080 include/linux/kvm_host.h 						gpa_t gpa)
gpa              1082 include/linux/kvm_host.h 	return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
gpa              1085 include/linux/kvm_host.h static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
gpa              1087 include/linux/kvm_host.h 	unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
gpa                46 include/linux/kvm_types.h 	gpa_t gpa;
gpa               214 include/trace/events/kvm.h 	TP_PROTO(int type, int len, u64 gpa, void *val),
gpa               215 include/trace/events/kvm.h 	TP_ARGS(type, len, gpa, val),
gpa               220 include/trace/events/kvm.h 		__field(	u64,	gpa		)
gpa               227 include/trace/events/kvm.h 		__entry->gpa		= gpa;
gpa               236 include/trace/events/kvm.h 		  __entry->len, __entry->gpa, __entry->val)
gpa                41 include/xen/interface/hvm/hvm_op.h     aligned_u64 gpa;
gpa               358 kernel/rcu/tree_stall.h 	unsigned long gpa;
gpa               404 kernel/rcu/tree_stall.h 			gpa = READ_ONCE(rcu_state.gp_activity);
gpa               406 kernel/rcu/tree_stall.h 			       rcu_state.name, j - gpa, j, gpa,
gpa               108 tools/testing/selftests/kvm/include/kvm_util.h void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
gpa                12 tools/testing/selftests/kvm/lib/aarch64/ucall.c static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
gpa                14 tools/testing/selftests/kvm/lib/aarch64/ucall.c 	if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
gpa                17 tools/testing/selftests/kvm/lib/aarch64/ucall.c 	virt_pg_map(vm, gpa, gpa, 0);
gpa                19 tools/testing/selftests/kvm/lib/aarch64/ucall.c 	ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
gpa                27 tools/testing/selftests/kvm/lib/aarch64/ucall.c 	vm_paddr_t gpa, start, end, step, offset;
gpa                32 tools/testing/selftests/kvm/lib/aarch64/ucall.c 		gpa = (vm_paddr_t)arg;
gpa                33 tools/testing/selftests/kvm/lib/aarch64/ucall.c 		ret = ucall_mmio_init(vm, gpa);
gpa                34 tools/testing/selftests/kvm/lib/aarch64/ucall.c 		TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa);
gpa              1028 tools/testing/selftests/kvm/lib/kvm_util.c void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
gpa              1033 tools/testing/selftests/kvm/lib/kvm_util.c 		if ((gpa >= region->region.guest_phys_addr)
gpa              1034 tools/testing/selftests/kvm/lib/kvm_util.c 			&& (gpa <= (region->region.guest_phys_addr
gpa              1037 tools/testing/selftests/kvm/lib/kvm_util.c 				+ (gpa - region->region.guest_phys_addr));
gpa              1040 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(false, "No vm physical memory at 0x%lx", gpa);
gpa                70 tools/testing/selftests/kvm/lib/s390x/processor.c void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa,
gpa                84 tools/testing/selftests/kvm/lib/s390x/processor.c 	TEST_ASSERT((gpa % vm->page_size) == 0,
gpa                88 tools/testing/selftests/kvm/lib/s390x/processor.c 	TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
gpa               106 tools/testing/selftests/kvm/lib/s390x/processor.c 			"WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
gpa               107 tools/testing/selftests/kvm/lib/s390x/processor.c 	entry[idx] = gpa;
gpa               957 virt/kvm/arm/mmu.c 			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
gpa               958 virt/kvm/arm/mmu.c 			unmap_stage2_range(kvm, gpa, vm_end - vm_start);
gpa              2017 virt/kvm/arm/mmu.c 					    gpa_t gpa, u64 size,
gpa              2030 virt/kvm/arm/mmu.c 		gfn_t gpa;
gpa              2038 virt/kvm/arm/mmu.c 		gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
gpa              2039 virt/kvm/arm/mmu.c 		ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
gpa              2045 virt/kvm/arm/mmu.c static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
gpa              2047 virt/kvm/arm/mmu.c 	unmap_stage2_range(kvm, gpa, size);
gpa              2062 virt/kvm/arm/mmu.c static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
gpa              2074 virt/kvm/arm/mmu.c 	stage2_set_pte(kvm, NULL, gpa, pte, 0);
gpa              2101 virt/kvm/arm/mmu.c static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
gpa              2108 virt/kvm/arm/mmu.c 	if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
gpa              2119 virt/kvm/arm/mmu.c static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
gpa              2126 virt/kvm/arm/mmu.c 	if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
gpa              2334 virt/kvm/arm/mmu.c 			gpa_t gpa = mem->guest_phys_addr +
gpa              2347 virt/kvm/arm/mmu.c 			ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
gpa              2393 virt/kvm/arm/mmu.c 	gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
gpa              2397 virt/kvm/arm/mmu.c 	unmap_stage2_range(kvm, gpa, size);
gpa              2085 virt/kvm/arm/vgic/vgic-its.c 	gpa_t gpa = base;
gpa              2095 virt/kvm/arm/vgic/vgic-its.c 		ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
gpa              2105 virt/kvm/arm/vgic/vgic-its.c 		gpa += byte_offset;
gpa              2115 virt/kvm/arm/vgic/vgic-its.c 			      struct its_ite *ite, gpa_t gpa, int ite_esz)
gpa              2126 virt/kvm/arm/vgic/vgic-its.c 	return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
gpa              2208 virt/kvm/arm/vgic/vgic-its.c 		gpa_t gpa = base + ite->event_id * ite_esz;
gpa              2219 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
gpa              2393 virt/kvm/arm/vgic/vgic-its.c 	gpa_t gpa;
gpa              2401 virt/kvm/arm/vgic/vgic-its.c 	gpa = entry & KVM_ITS_L1E_ADDR_MASK;
gpa              2403 virt/kvm/arm/vgic/vgic-its.c 	ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
gpa              2445 virt/kvm/arm/vgic/vgic-its.c 			     gpa_t gpa, int esz)
gpa              2453 virt/kvm/arm/vgic/vgic-its.c 	return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
gpa              2456 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
gpa              2465 virt/kvm/arm/vgic/vgic-its.c 	ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
gpa              2497 virt/kvm/arm/vgic/vgic-its.c 	gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
gpa              2509 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
gpa              2512 virt/kvm/arm/vgic/vgic-its.c 		gpa += cte_esz;
gpa              2525 virt/kvm/arm/vgic/vgic-its.c 	ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
gpa              2540 virt/kvm/arm/vgic/vgic-its.c 	gpa_t gpa;
gpa              2546 virt/kvm/arm/vgic/vgic-its.c 	gpa = GITS_BASER_ADDR_48_to_52(baser);
gpa              2551 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_restore_cte(its, gpa, cte_esz);
gpa              2554 virt/kvm/arm/vgic/vgic-its.c 		gpa += cte_esz;
gpa              2070 virt/kvm/kvm_main.c int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
gpa              2072 virt/kvm/kvm_main.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              2074 virt/kvm/kvm_main.c 	int offset = offset_in_page(gpa);
gpa              2090 virt/kvm/kvm_main.c int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
gpa              2092 virt/kvm/kvm_main.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              2094 virt/kvm/kvm_main.c 	int offset = offset_in_page(gpa);
gpa              2127 virt/kvm/kvm_main.c int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
gpa              2130 virt/kvm/kvm_main.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              2132 virt/kvm/kvm_main.c 	int offset = offset_in_page(gpa);
gpa              2138 virt/kvm/kvm_main.c int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
gpa              2141 virt/kvm/kvm_main.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              2143 virt/kvm/kvm_main.c 	int offset = offset_in_page(gpa);
gpa              2183 virt/kvm/kvm_main.c int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
gpa              2186 virt/kvm/kvm_main.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              2188 virt/kvm/kvm_main.c 	int offset = offset_in_page(gpa);
gpa              2204 virt/kvm/kvm_main.c int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
gpa              2207 virt/kvm/kvm_main.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              2209 virt/kvm/kvm_main.c 	int offset = offset_in_page(gpa);
gpa              2227 virt/kvm/kvm_main.c 				       gpa_t gpa, unsigned long len)
gpa              2229 virt/kvm/kvm_main.c 	int offset = offset_in_page(gpa);
gpa              2230 virt/kvm/kvm_main.c 	gfn_t start_gfn = gpa >> PAGE_SHIFT;
gpa              2231 virt/kvm/kvm_main.c 	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
gpa              2236 virt/kvm/kvm_main.c 	ghc->gpa = gpa;
gpa              2264 virt/kvm/kvm_main.c 			      gpa_t gpa, unsigned long len)
gpa              2267 virt/kvm/kvm_main.c 	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
gpa              2277 virt/kvm/kvm_main.c 	gpa_t gpa = ghc->gpa + offset;
gpa              2282 virt/kvm/kvm_main.c 		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
gpa              2288 virt/kvm/kvm_main.c 		return kvm_write_guest(kvm, gpa, data, len);
gpa              2293 virt/kvm/kvm_main.c 	mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
gpa              2315 virt/kvm/kvm_main.c 		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
gpa              2321 virt/kvm/kvm_main.c 		return kvm_read_guest(kvm, ghc->gpa, data, len);
gpa              2339 virt/kvm/kvm_main.c int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
gpa              2341 virt/kvm/kvm_main.c 	gfn_t gfn = gpa >> PAGE_SHIFT;
gpa              2343 virt/kvm/kvm_main.c 	int offset = offset_in_page(gpa);