pgd               241 arch/alpha/include/asm/mmu_context.h 		  = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
pgd               255 arch/alpha/include/asm/mmu_context.h 	  = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
pgd                76 arch/alpha/include/asm/mmzone.h #define pgd_page(pgd)		(pfn_to_page(pgd_val(pgd) >> 32))
pgd                33 arch/alpha/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                38 arch/alpha/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                30 arch/alpha/include/asm/pgalloc.h pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
pgd                32 arch/alpha/include/asm/pgalloc.h 	pgd_set(pgd, pmd);
pgd                38 arch/alpha/include/asm/pgalloc.h pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                40 arch/alpha/include/asm/pgalloc.h 	free_page((unsigned long)pgd);
pgd               241 arch/alpha/include/asm/pgtable.h #define pgd_page(pgd)	(mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32))
pgd               244 arch/alpha/include/asm/pgtable.h extern inline unsigned long pgd_page_vaddr(pgd_t pgd)
pgd               245 arch/alpha/include/asm/pgtable.h { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
pgd               259 arch/alpha/include/asm/pgtable.h extern inline int pgd_none(pgd_t pgd)		{ return !pgd_val(pgd); }
pgd               260 arch/alpha/include/asm/pgtable.h extern inline int pgd_bad(pgd_t pgd)		{ return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
pgd               261 arch/alpha/include/asm/pgtable.h extern inline int pgd_present(pgd_t pgd)	{ return pgd_val(pgd) & _PAGE_VALID; }
pgd               288 arch/alpha/include/asm/pgtable.h #define pgd_offset(mm, address)	((mm)->pgd+pgd_index(address))
pgd                51 arch/alpha/mm/fault.c 	pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
pgd               241 arch/alpha/mm/fault.c 		pgd_t *pgd, *pgd_k;
pgd               243 arch/alpha/mm/fault.c 		pgd = current->active_mm->pgd + index;
pgd               245 arch/alpha/mm/fault.c 		if (!pgd_present(*pgd) && pgd_present(*pgd_k)) {
pgd               246 arch/alpha/mm/fault.c 			pgd_val(*pgd) = pgd_val(*pgd_k);
pgd               148 arch/alpha/mm/init.c 	pgd_t *pgd;
pgd               186 arch/alpha/mm/init.c 	pgd = pgd_offset_k(VMALLOC_START);
pgd               187 arch/alpha/mm/init.c 	pgd_set(pgd, (pmd_t *)two_pages);
pgd               188 arch/alpha/mm/init.c 	pmd = pmd_offset(pgd, VMALLOC_START);
pgd               217 arch/alpha/mm/init.c 				if (pmd != pmd_offset(pgd, vaddr)) {
pgd               219 arch/alpha/mm/init.c 					pmd = pmd_offset(pgd, vaddr);
pgd               149 arch/arc/include/asm/mmu_context.h 	write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
pgd                38 arch/arc/include/asm/page.h 	unsigned long pgd;
pgd                45 arch/arc/include/asm/page.h #define pgd_val(x)      ((x).pgd)
pgd                71 arch/arc/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                73 arch/arc/include/asm/pgalloc.h 	free_pages((unsigned long)pgd, __get_order_pgd());
pgd               339 arch/arc/include/asm/pgtable.h #define pgd_offset(mm, addr)	(((mm)->pgd)+pgd_index(addr))
pgd                41 arch/arc/kernel/asm-offsets.c 	DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
pgd                32 arch/arc/mm/fault.c 	pgd_t *pgd, *pgd_k;
pgd                36 arch/arc/mm/fault.c 	pgd = pgd_offset_fast(current->active_mm, address);
pgd                42 arch/arc/mm/fault.c 	pud = pud_offset(pgd, address);
pgd                70 arch/arm/include/asm/kvm_host.h 	pgd_t *pgd;
pgd                66 arch/arm/include/asm/mmu_context.h 		cpu_switch_mm(mm->pgd, mm);
pgd                86 arch/arm/include/asm/mmu_context.h 			cpu_switch_mm(mm->pgd, mm);
pgd                53 arch/arm/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
pgd                23 arch/arm/include/asm/pgtable-2level-types.h typedef struct { pmdval_t pgd[2]; } pgd_t;
pgd                28 arch/arm/include/asm/pgtable-2level-types.h #define pgd_val(x)	((x).pgd[0])
pgd                26 arch/arm/include/asm/pgtable-3level-types.h typedef struct { pgdval_t pgd; } pgd_t;
pgd                31 arch/arm/include/asm/pgtable-3level-types.h #define pgd_val(x)	((x).pgd)
pgd                20 arch/arm/include/asm/pgtable-nommu.h #define pgd_present(pgd)	(1)
pgd                21 arch/arm/include/asm/pgtable-nommu.h #define pgd_none(pgd)		(0)
pgd                22 arch/arm/include/asm/pgtable-nommu.h #define pgd_bad(pgd)		(0)
pgd                55 arch/arm/include/asm/pgtable.h #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd)
pgd               182 arch/arm/include/asm/pgtable.h #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
pgd               154 arch/arm/include/asm/proc-fns.h #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
pgd               185 arch/arm/include/asm/proc-fns.h #define cpu_switch_mm(pgd,mm)	{ }
pgd                19 arch/arm/include/asm/stage2_pgtable.h #define stage2_pgd_none(kvm, pgd)		pgd_none(pgd)
pgd                20 arch/arm/include/asm/stage2_pgtable.h #define stage2_pgd_clear(kvm, pgd)		pgd_clear(pgd)
pgd                21 arch/arm/include/asm/stage2_pgtable.h #define stage2_pgd_present(kvm, pgd)		pgd_present(pgd)
pgd                22 arch/arm/include/asm/stage2_pgtable.h #define stage2_pgd_populate(kvm, pgd, pud)	pgd_populate(NULL, pgd, pud)
pgd                23 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_offset(kvm, pgd, address)	pud_offset(pgd, address)
pgd                92 arch/arm/kernel/smp.c static unsigned long get_arch_pgd(pgd_t *pgd)
pgd                95 arch/arm/kernel/smp.c 	return __phys_to_pfn(virt_to_phys(pgd));
pgd                97 arch/arm/kernel/smp.c 	return virt_to_phys(pgd);
pgd               395 arch/arm/kernel/smp.c 	cpu_switch_mm(mm->pgd, mm);
pgd                37 arch/arm/kernel/suspend.c 		cpu_switch_mm(mm->pgd, mm);
pgd               754 arch/arm/kernel/traps.c void __pgd_error(const char *file, int line, pgd_t pgd)
pgd               756 arch/arm/kernel/traps.c 	pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
pgd                26 arch/arm/lib/uaccess_with_memcpy.c 	pgd_t *pgd;
pgd                32 arch/arm/lib/uaccess_with_memcpy.c 	pgd = pgd_offset(current->mm, addr);
pgd                33 arch/arm/lib/uaccess_with_memcpy.c 	if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
pgd                36 arch/arm/lib/uaccess_with_memcpy.c 	pud = pud_offset(pgd, addr);
pgd               276 arch/arm/mm/context.c 	cpu_switch_mm(mm->pgd, mm);
pgd               358 arch/arm/mm/dump.c static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
pgd               360 arch/arm/mm/dump.c 	pud_t *pud = pud_offset(pgd, 0);
pgd               377 arch/arm/mm/dump.c 	pgd_t *pgd = pgd_offset(mm, 0UL);
pgd               381 arch/arm/mm/dump.c 	for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
pgd               383 arch/arm/mm/dump.c 		if (!pgd_none(*pgd)) {
pgd               384 arch/arm/mm/dump.c 			walk_pud(st, pgd, addr);
pgd               386 arch/arm/mm/dump.c 			note_page(st, addr, 1, pgd_val(*pgd), NULL);
pgd                93 arch/arm/mm/fault-armv.c 	pgd_t *pgd;
pgd                99 arch/arm/mm/fault-armv.c 	pgd = pgd_offset(vma->vm_mm, address);
pgd               100 arch/arm/mm/fault-armv.c 	if (pgd_none_or_clear_bad(pgd))
pgd               103 arch/arm/mm/fault-armv.c 	pud = pud_offset(pgd, address);
pgd                36 arch/arm/mm/fault.c 	pgd_t *pgd;
pgd                41 arch/arm/mm/fault.c 	printk("%spgd = %p\n", lvl, mm->pgd);
pgd                42 arch/arm/mm/fault.c 	pgd = pgd_offset(mm, addr);
pgd                43 arch/arm/mm/fault.c 	printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd));
pgd                50 arch/arm/mm/fault.c 		if (pgd_none(*pgd))
pgd                53 arch/arm/mm/fault.c 		if (pgd_bad(*pgd)) {
pgd                58 arch/arm/mm/fault.c 		pud = pud_offset(pgd, addr);
pgd               410 arch/arm/mm/fault.c 	pgd_t *pgd, *pgd_k;
pgd               422 arch/arm/mm/fault.c 	pgd = cpu_get_pgd() + index;
pgd               423 arch/arm/mm/fault.c 	pgd_k = init_mm.pgd + index;
pgd               427 arch/arm/mm/fault.c 	if (!pgd_present(*pgd))
pgd               428 arch/arm/mm/fault.c 		set_pgd(pgd, *pgd_k);
pgd               430 arch/arm/mm/fault.c 	pud = pud_offset(pgd, addr);
pgd                68 arch/arm/mm/idmap.c static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
pgd                71 arch/arm/mm/idmap.c 	pud_t *pud = pud_offset(pgd, addr);
pgd                80 arch/arm/mm/idmap.c static void identity_mapping_add(pgd_t *pgd, const char *text_start,
pgd                95 arch/arm/mm/idmap.c 	pgd += pgd_index(addr);
pgd                98 arch/arm/mm/idmap.c 		idmap_add_pud(pgd, addr, next, prot);
pgd                99 arch/arm/mm/idmap.c 	} while (pgd++, addr = next, addr != end);
pgd               144 arch/arm/mm/ioremap.c 	pgd_t *pgd;
pgd               149 arch/arm/mm/ioremap.c 	pgd = pgd_offset_k(addr);
pgd               150 arch/arm/mm/ioremap.c 	pud = pud_offset(pgd, addr);
pgd               192 arch/arm/mm/ioremap.c 	pgd_t *pgd;
pgd               202 arch/arm/mm/ioremap.c 	pgd = pgd_offset_k(addr);
pgd               203 arch/arm/mm/ioremap.c 	pud = pud_offset(pgd, addr);
pgd               224 arch/arm/mm/ioremap.c 	pgd_t *pgd;
pgd               234 arch/arm/mm/ioremap.c 	pgd = pgd_offset_k(virt);
pgd               235 arch/arm/mm/ioremap.c 	pud = pud_offset(pgd, addr);
pgd               377 arch/arm/mm/mmu.c 	pgd_t *pgd = pgd_offset_k(addr);
pgd               378 arch/arm/mm/mmu.c 	pud_t *pud = pud_offset(pgd, addr);
pgd               830 arch/arm/mm/mmu.c static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
pgd               835 arch/arm/mm/mmu.c 	pud_t *pud = pud_offset(pgd, addr);
pgd               853 arch/arm/mm/mmu.c 	pgd_t *pgd;
pgd               889 arch/arm/mm/mmu.c 	pgd = pgd_offset(mm, addr);
pgd               892 arch/arm/mm/mmu.c 		pud_t *pud = pud_offset(pgd, addr);
pgd               902 arch/arm/mm/mmu.c 		pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
pgd               914 arch/arm/mm/mmu.c 	pgd_t *pgd;
pgd               938 arch/arm/mm/mmu.c 	pgd = pgd_offset(mm, addr);
pgd               943 arch/arm/mm/mmu.c 		alloc_init_pud(pgd, addr, next, phys, type, alloc, ng);
pgd               947 arch/arm/mm/mmu.c 	} while (pgd++, addr != end);
pgd              1516 arch/arm/mm/mmu.c typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
pgd                21 arch/arm/mm/pgd.c #define __pgd_free(pgd)	kfree(pgd)
pgd                24 arch/arm/mm/pgd.c #define __pgd_free(pgd)	free_pages((unsigned long)pgd, 2)
pgd               118 arch/arm/mm/pgd.c 	pgd_t *pgd;
pgd               126 arch/arm/mm/pgd.c 	pgd = pgd_base + pgd_index(0);
pgd               127 arch/arm/mm/pgd.c 	if (pgd_none_or_clear_bad(pgd))
pgd               130 arch/arm/mm/pgd.c 	pud = pud_offset(pgd, 0);
pgd               147 arch/arm/mm/pgd.c 	pgd_clear(pgd);
pgd               154 arch/arm/mm/pgd.c 	for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
pgd               155 arch/arm/mm/pgd.c 		if (pgd_none_or_clear_bad(pgd))
pgd               157 arch/arm/mm/pgd.c 		if (pgd_val(*pgd) & L_PGD_SWAPPER)
pgd               159 arch/arm/mm/pgd.c 		pud = pud_offset(pgd, 0);
pgd               166 arch/arm/mm/pgd.c 		pgd_clear(pgd);
pgd                69 arch/arm64/include/asm/kvm_host.h 	pgd_t *pgd;
pgd               133 arch/arm64/include/asm/mmu.h 	.pgd = init_pg_dir,
pgd                49 arch/arm64/include/asm/mmu_context.h static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
pgd                51 arch/arm64/include/asm/mmu_context.h 	BUG_ON(pgd == swapper_pg_dir);
pgd                53 arch/arm64/include/asm/mmu_context.h 	cpu_do_switch_mm(virt_to_phys(pgd),mm);
pgd               122 arch/arm64/include/asm/mmu_context.h 		cpu_switch_mm(mm->pgd, mm);
pgd               192 arch/arm64/include/asm/mmu_context.h 		ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
pgd                38 arch/arm64/include/asm/pgtable-types.h typedef struct { pgdval_t pgd; } pgd_t;
pgd                39 arch/arm64/include/asm/pgtable-types.h #define pgd_val(x)	((x).pgd)
pgd               296 arch/arm64/include/asm/pgtable.h static inline pte_t pgd_pte(pgd_t pgd)
pgd               298 arch/arm64/include/asm/pgtable.h 	return __pte(pgd_val(pgd));
pgd               404 arch/arm64/include/asm/pgtable.h #define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
pgd               461 arch/arm64/include/asm/pgtable.h extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
pgd               589 arch/arm64/include/asm/pgtable.h #define pgd_none(pgd)		(!pgd_val(pgd))
pgd               590 arch/arm64/include/asm/pgtable.h #define pgd_bad(pgd)		(!(pgd_val(pgd) & 2))
pgd               591 arch/arm64/include/asm/pgtable.h #define pgd_present(pgd)	(pgd_val(pgd))
pgd               593 arch/arm64/include/asm/pgtable.h static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
pgd               596 arch/arm64/include/asm/pgtable.h 		set_swapper_pgd(pgdp, pgd);
pgd               600 arch/arm64/include/asm/pgtable.h 	WRITE_ONCE(*pgdp, pgd);
pgd               610 arch/arm64/include/asm/pgtable.h static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
pgd               612 arch/arm64/include/asm/pgtable.h 	return __pgd_to_phys(pgd);
pgd               622 arch/arm64/include/asm/pgtable.h #define pud_set_fixmap_offset(pgd, addr)	pud_set_fixmap(pud_offset_phys(pgd, addr))
pgd               625 arch/arm64/include/asm/pgtable.h #define pgd_page(pgd)		pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
pgd               632 arch/arm64/include/asm/pgtable.h #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
pgd               643 arch/arm64/include/asm/pgtable.h #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
pgd               648 arch/arm64/include/asm/pgtable.h #define pgd_offset_raw(pgd, addr)	((pgd) + pgd_index(addr))
pgd               650 arch/arm64/include/asm/pgtable.h #define pgd_offset(mm, addr)	(pgd_offset_raw((mm)->pgd, (addr)))
pgd               693 arch/arm64/include/asm/pgtable.h static inline int pgd_devmap(pgd_t pgd)
pgd                71 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
pgd                74 arch/arm64/include/asm/stage2_pgtable.h 		return pgd_none(pgd);
pgd                85 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
pgd                88 arch/arm64/include/asm/stage2_pgtable.h 		return pgd_present(pgd);
pgd                93 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
pgd                96 arch/arm64/include/asm/stage2_pgtable.h 		pgd_populate(NULL, pgd, pud);
pgd               100 arch/arm64/include/asm/stage2_pgtable.h 				       pgd_t *pgd, unsigned long address)
pgd               103 arch/arm64/include/asm/stage2_pgtable.h 		return pud_offset(pgd, address);
pgd               105 arch/arm64/include/asm/stage2_pgtable.h 		return (pud_t *)pgd;
pgd               237 arch/arm64/mm/context.c 		cpu_switch_mm(mm->pgd, mm);
pgd               352 arch/arm64/mm/dump.c 		pgd_t pgd = READ_ONCE(*pgdp);
pgd               355 arch/arm64/mm/dump.c 		if (pgd_none(pgd)) {
pgd               356 arch/arm64/mm/dump.c 			note_page(st, addr, 1, pgd_val(pgd));
pgd               358 arch/arm64/mm/dump.c 			BUG_ON(pgd_bad(pgd));
pgd               120 arch/arm64/mm/fault.c 		return __pa_symbol(mm->pgd);
pgd               122 arch/arm64/mm/fault.c 	return (unsigned long)virt_to_phys(mm->pgd);
pgd               132 arch/arm64/mm/fault.c 	pgd_t pgd;
pgd               155 arch/arm64/mm/fault.c 	pgd = READ_ONCE(*pgdp);
pgd               156 arch/arm64/mm/fault.c 	pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
pgd               163 arch/arm64/mm/fault.c 		if (pgd_none(pgd) || pgd_bad(pgd))
pgd                63 arch/arm64/mm/mmu.c void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
pgd                69 arch/arm64/mm/mmu.c 	WRITE_ONCE(*fixmap_pgdp, pgd);
pgd               292 arch/arm64/mm/mmu.c 	pgd_t pgd = READ_ONCE(*pgdp);
pgd               294 arch/arm64/mm/mmu.c 	if (pgd_none(pgd)) {
pgd               299 arch/arm64/mm/mmu.c 		pgd = READ_ONCE(*pgdp);
pgd               301 arch/arm64/mm/mmu.c 	BUG_ON(pgd_bad(pgd));
pgd               407 arch/arm64/mm/mmu.c 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
pgd               422 arch/arm64/mm/mmu.c 	__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
pgd               435 arch/arm64/mm/mmu.c 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
pgd               680 arch/arm64/mm/mmu.c 	init_mm.pgd = swapper_pg_dir;
pgd               781 arch/arm64/mm/mmu.c 	pgd_t pgd = READ_ONCE(*pgdp);
pgd               783 arch/arm64/mm/mmu.c 	BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
pgd               811 arch/arm64/mm/mmu.c 	pgd_t *pgdp, pgd;
pgd               817 arch/arm64/mm/mmu.c 	pgd = READ_ONCE(*pgdp);
pgd               819 arch/arm64/mm/mmu.c 	    !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
pgd               828 arch/arm64/mm/mmu.c 		if (pgd_none(pgd))
pgd                30 arch/arm64/mm/pgd.c void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                33 arch/arm64/mm/pgd.c 		free_page((unsigned long)pgd);
pgd                35 arch/arm64/mm/pgd.c 		kmem_cache_free(pgd_cache, pgd);
pgd                23 arch/c6x/include/asm/pgtable.h #define pgd_present(pgd)	(1)
pgd                24 arch/c6x/include/asm/pgtable.h #define pgd_none(pgd)		(0)
pgd                25 arch/c6x/include/asm/pgtable.h #define pgd_bad(pgd)		(0)
pgd                92 arch/csky/abiv1/inc/abi/ckmmu.h static inline void setup_pgd(unsigned long pgd, bool kernel)
pgd                94 arch/csky/abiv1/inc/abi/ckmmu.h 	cpwcr("cpcr29", pgd | BIT(0));
pgd               103 arch/csky/abiv2/inc/abi/ckmmu.h static inline void setup_pgd(unsigned long pgd, bool kernel)
pgd               106 arch/csky/abiv2/inc/abi/ckmmu.h 		mtcr("cr<28, 15>", pgd | BIT(0));
pgd               108 arch/csky/abiv2/inc/abi/ckmmu.h 		mtcr("cr<29, 15>", pgd | BIT(0));
pgd                17 arch/csky/include/asm/mmu_context.h #define TLBMISS_HANDLER_SETUP_PGD(pgd) \
pgd                18 arch/csky/include/asm/mmu_context.h 	setup_pgd(__pa(pgd), false)
pgd                20 arch/csky/include/asm/mmu_context.h #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \
pgd                21 arch/csky/include/asm/mmu_context.h 	setup_pgd(__pa(pgd), true)
pgd                44 arch/csky/include/asm/mmu_context.h 	TLBMISS_HANDLER_SETUP_PGD(next->pgd);
pgd                62 arch/csky/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                66 arch/csky/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                45 arch/csky/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                47 arch/csky/include/asm/pgalloc.h 	free_pages((unsigned long)pgd, PGD_ORDER);
pgd               290 arch/csky/include/asm/pgtable.h 	return mm->pgd + pgd_index(address);
pgd                78 arch/csky/mm/fault.c 		pgd_t *pgd, *pgd_k;
pgd                86 arch/csky/mm/fault.c 		pgd = (pgd_t *)pgd_base + offset;
pgd                87 arch/csky/mm/fault.c 		pgd_k = init_mm.pgd + offset;
pgd                91 arch/csky/mm/fault.c 		set_pgd(pgd, *pgd_k);
pgd                93 arch/csky/mm/fault.c 		pud = (pud_t *)pgd;
pgd               124 arch/csky/mm/highmem.c 	pgd_t *pgd;
pgd               135 arch/csky/mm/highmem.c 	pgd = pgd_base + i;
pgd               137 arch/csky/mm/highmem.c 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
pgd               138 arch/csky/mm/highmem.c 		pud = (pud_t *)pgd;
pgd               166 arch/csky/mm/highmem.c 	pgd_t *pgd;
pgd               186 arch/csky/mm/highmem.c 	pgd = swapper_pg_dir + __pgd_offset(vaddr);
pgd               187 arch/csky/mm/highmem.c 	pud = (pud_t *)pgd;
pgd                66 arch/hexagon/include/asm/mmu_context.h 			next->pgd[l1] = init_mm.pgd[l1];
pgd                70 arch/hexagon/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                75 arch/hexagon/include/asm/page.h #define pgd_val(x)     ((x).pgd)
pgd                23 arch/hexagon/include/asm/pgalloc.h 	pgd_t *pgd;
pgd                25 arch/hexagon/include/asm/pgalloc.h 	pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
pgd                35 arch/hexagon/include/asm/pgalloc.h 	memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t));
pgd                39 arch/hexagon/include/asm/pgalloc.h 	mm->context.ptbase = __pa(pgd);
pgd                41 arch/hexagon/include/asm/pgalloc.h 	return pgd;
pgd                44 arch/hexagon/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                46 arch/hexagon/include/asm/pgalloc.h 	free_page((unsigned long) pgd);
pgd                88 arch/hexagon/include/asm/pgalloc.h 	pmdindex = (pgd_t *)pmd - mm->pgd;
pgd                89 arch/hexagon/include/asm/pgalloc.h 	ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
pgd               232 arch/hexagon/include/asm/pgtable.h #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
pgd                71 arch/hexagon/mm/init.c 	init_mm.context.ptbase = __pa(init_mm.pgd);
pgd               193 arch/ia64/include/asm/mmu_context.h 	ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
pgd               181 arch/ia64/include/asm/page.h   typedef struct { unsigned long pgd; } pgd_t;
pgd               190 arch/ia64/include/asm/page.h # define pgd_val(x)	((x).pgd)
pgd                32 arch/ia64/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                34 arch/ia64/include/asm/pgalloc.h 	free_page((unsigned long)pgd);
pgd               286 arch/ia64/include/asm/pgtable.h #define pgd_none(pgd)			(!pgd_val(pgd))
pgd               287 arch/ia64/include/asm/pgtable.h #define pgd_bad(pgd)			(!ia64_phys_addr_valid(pgd_val(pgd)))
pgd               288 arch/ia64/include/asm/pgtable.h #define pgd_present(pgd)		(pgd_val(pgd) != 0UL)
pgd               290 arch/ia64/include/asm/pgtable.h #define pgd_page_vaddr(pgd)		((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
pgd               291 arch/ia64/include/asm/pgtable.h #define pgd_page(pgd)			virt_to_page((pgd_val(pgd) + PAGE_OFFSET))
pgd               375 arch/ia64/include/asm/pgtable.h 	return mm->pgd + pgd_index(address);
pgd               381 arch/ia64/include/asm/pgtable.h 	(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
pgd                31 arch/ia64/mm/fault.c 	pgd_t *pgd;
pgd                36 arch/ia64/mm/fault.c 	pgd = pgd_offset_k(address);
pgd                37 arch/ia64/mm/fault.c 	if (pgd_none(*pgd) || pgd_bad(*pgd))
pgd                40 arch/ia64/mm/fault.c 	pud = pud_offset(pgd, address);
pgd                32 arch/ia64/mm/hugetlbpage.c 	pgd_t *pgd;
pgd                37 arch/ia64/mm/hugetlbpage.c 	pgd = pgd_offset(mm, taddr);
pgd                38 arch/ia64/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, taddr);
pgd                51 arch/ia64/mm/hugetlbpage.c 	pgd_t *pgd;
pgd                56 arch/ia64/mm/hugetlbpage.c 	pgd = pgd_offset(mm, taddr);
pgd                57 arch/ia64/mm/hugetlbpage.c 	if (pgd_present(*pgd)) {
pgd                58 arch/ia64/mm/hugetlbpage.c 		pud = pud_offset(pgd, taddr);
pgd               210 arch/ia64/mm/init.c 	pgd_t *pgd;
pgd               215 arch/ia64/mm/init.c 	pgd = pgd_offset_k(address);		/* note: this is NOT pgd_offset()! */
pgd               218 arch/ia64/mm/init.c 		pud = pud_alloc(&init_mm, pgd, address);
pgd               384 arch/ia64/mm/init.c 		pgd_t *pgd;
pgd               389 arch/ia64/mm/init.c 		pgd = pgd_offset_k(end_address);
pgd               390 arch/ia64/mm/init.c 		if (pgd_none(*pgd)) {
pgd               395 arch/ia64/mm/init.c 		pud = pud_offset(pgd, end_address);
pgd               432 arch/ia64/mm/init.c 	pgd_t *pgd;
pgd               445 arch/ia64/mm/init.c 		pgd = pgd_offset_k(address);
pgd               446 arch/ia64/mm/init.c 		if (pgd_none(*pgd)) {
pgd               450 arch/ia64/mm/init.c 			pgd_populate(&init_mm, pgd, pud);
pgd               452 arch/ia64/mm/init.c 		pud = pud_offset(pgd, address);
pgd                26 arch/m68k/include/asm/mcf_pgalloc.h extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
pgd                28 arch/m68k/include/asm/mcf_pgalloc.h 	return (pmd_t *) pgd;
pgd                86 arch/m68k/include/asm/mcf_pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                88 arch/m68k/include/asm/mcf_pgalloc.h 	free_page((unsigned long) pgd);
pgd               201 arch/m68k/include/asm/mcf_pgtable.h static inline int pgd_none(pgd_t pgd) { return 0; }
pgd               202 arch/m68k/include/asm/mcf_pgtable.h static inline int pgd_bad(pgd_t pgd) { return 0; }
pgd               203 arch/m68k/include/asm/mcf_pgtable.h static inline int pgd_present(pgd_t pgd) { return 1; }
pgd               335 arch/m68k/include/asm/mcf_pgtable.h #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
pgd               345 arch/m68k/include/asm/mcf_pgtable.h static inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
pgd               347 arch/m68k/include/asm/mcf_pgtable.h 	return (pmd_t *) pgd;
pgd                70 arch/m68k/include/asm/mmu_context.h static inline void set_context(mm_context_t context, pgd_t *pgd)
pgd                79 arch/m68k/include/asm/mmu_context.h 	set_context(tsk->mm->context, next->pgd);
pgd                90 arch/m68k/include/asm/mmu_context.h 	set_context(mm->context, mm->pgd);
pgd               102 arch/m68k/include/asm/mmu_context.h 	pgd_t *pgd;
pgd               126 arch/m68k/include/asm/mmu_context.h 	pgd = pgd_offset(mm, mmuar);
pgd               127 arch/m68k/include/asm/mmu_context.h 	if (pgd_none(*pgd))
pgd               130 arch/m68k/include/asm/mmu_context.h 	pmd = pmd_offset(pgd, mmuar);
pgd               220 arch/m68k/include/asm/mmu_context.h 	mm->context = virt_to_phys(mm->pgd);
pgd               298 arch/m68k/include/asm/mmu_context.h 	next_mm->context = virt_to_phys(next_mm->pgd);
pgd                87 arch/m68k/include/asm/motorola_pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                89 arch/m68k/include/asm/motorola_pgalloc.h 	pmd_free(mm, (pmd_t *)pgd);
pgd               109 arch/m68k/include/asm/motorola_pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
pgd               111 arch/m68k/include/asm/motorola_pgalloc.h 	pgd_set(pgd, pmd);
pgd               127 arch/m68k/include/asm/motorola_pgtable.h #define __pgd_page(pgd) ((unsigned long)__va(pgd_val(pgd) & _TABLE_MASK))
pgd               150 arch/m68k/include/asm/motorola_pgtable.h #define pgd_none(pgd)		(!pgd_val(pgd))
pgd               151 arch/m68k/include/asm/motorola_pgtable.h #define pgd_bad(pgd)		((pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE)
pgd               152 arch/m68k/include/asm/motorola_pgtable.h #define pgd_present(pgd)	(pgd_val(pgd) & _PAGE_TABLE)
pgd               154 arch/m68k/include/asm/motorola_pgtable.h #define pgd_page(pgd)		(mem_map + ((unsigned long)(__va(pgd_val(pgd)) - PAGE_OFFSET) >> PAGE_SHIFT))
pgd               199 arch/m68k/include/asm/motorola_pgtable.h 	return mm->pgd + pgd_index(address);
pgd                26 arch/m68k/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                32 arch/m68k/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                19 arch/m68k/include/asm/pgtable_no.h #define pgd_present(pgd)	(1)
pgd                20 arch/m68k/include/asm/pgtable_no.h #define pgd_none(pgd)		(0)
pgd                21 arch/m68k/include/asm/pgtable_no.h #define pgd_bad(pgd)		(0)
pgd                46 arch/m68k/include/asm/sun3_pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                48 arch/m68k/include/asm/sun3_pgalloc.h         free_page((unsigned long) pgd);
pgd               148 arch/m68k/include/asm/sun3_pgtable.h static inline int pgd_none (pgd_t pgd) { return 0; }
pgd               149 arch/m68k/include/asm/sun3_pgtable.h static inline int pgd_bad (pgd_t pgd) { return 0; }
pgd               150 arch/m68k/include/asm/sun3_pgtable.h static inline int pgd_present (pgd_t pgd) { return 1; }
pgd               192 arch/m68k/include/asm/sun3_pgtable.h ((mm)->pgd + pgd_index(address))
pgd               198 arch/m68k/include/asm/sun3_pgtable.h static inline pmd_t *pmd_offset (pgd_t *pgd, unsigned long address)
pgd               200 arch/m68k/include/asm/sun3_pgtable.h 	return (pmd_t *) pgd;
pgd               467 arch/m68k/kernel/sys_m68k.c 		pgd_t *pgd;
pgd               474 arch/m68k/kernel/sys_m68k.c 		pgd = pgd_offset(mm, (unsigned long)mem);
pgd               475 arch/m68k/kernel/sys_m68k.c 		if (!pgd_present(*pgd))
pgd               477 arch/m68k/kernel/sys_m68k.c 		pmd = pmd_offset(pgd, (unsigned long)mem);
pgd                77 arch/m68k/mm/fault.c 		regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL);
pgd                94 arch/m68k/mm/mcfmmu.c 	pgd_t *pgd;
pgd               110 arch/m68k/mm/mcfmmu.c 	pgd = pgd_offset(mm, mmuar);
pgd               111 arch/m68k/mm/mcfmmu.c 	if (pgd_none(*pgd))  {
pgd               116 arch/m68k/mm/mcfmmu.c 	pmd = pmd_offset(pgd, mmuar);
pgd               371 arch/m68k/sun3/mmu_emu.c 			crp = current->mm->pgd;
pgd                82 arch/m68k/sun3x/dvma.c 	pgd_t *pgd;
pgd                92 arch/m68k/sun3x/dvma.c 	pgd = pgd_offset_k(vaddr);
pgd                98 arch/m68k/sun3x/dvma.c 		if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) {
pgd                57 arch/microblaze/include/asm/mmu_context_mm.h extern void set_context(mm_context_t context, pgd_t *pgd);
pgd               123 arch/microblaze/include/asm/mmu_context_mm.h 	tsk->thread.pgdir = next->pgd;
pgd               125 arch/microblaze/include/asm/mmu_context_mm.h 	set_context(next->context, next->pgd);
pgd               135 arch/microblaze/include/asm/mmu_context_mm.h 	current->thread.pgdir = mm->pgd;
pgd               137 arch/microblaze/include/asm/mmu_context_mm.h 	set_context(mm->context, mm->pgd);
pgd                94 arch/microblaze/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd               107 arch/microblaze/include/asm/page.h #   define pgd_val(x)      ((x).pgd)
pgd                34 arch/microblaze/include/asm/pgalloc.h static inline void free_pgd(pgd_t *pgd)
pgd                36 arch/microblaze/include/asm/pgalloc.h 	free_page((unsigned long)pgd);
pgd                39 arch/microblaze/include/asm/pgalloc.h #define pgd_free(mm, pgd)	free_pgd(pgd)
pgd                22 arch/microblaze/include/asm/pgtable.h #define pgd_present(pgd)	(1) /* pages are always present on non MMU */
pgd                23 arch/microblaze/include/asm/pgtable.h #define pgd_none(pgd)		(0)
pgd                24 arch/microblaze/include/asm/pgtable.h #define pgd_bad(pgd)		(0)
pgd               321 arch/microblaze/include/asm/pgtable.h static inline int pgd_none(pgd_t pgd)		{ return 0; }
pgd               322 arch/microblaze/include/asm/pgtable.h static inline int pgd_bad(pgd_t pgd)		{ return 0; }
pgd               323 arch/microblaze/include/asm/pgtable.h static inline int pgd_present(pgd_t pgd)	{ return 1; }
pgd               325 arch/microblaze/include/asm/pgtable.h #define pgd_page(pgd) \
pgd               326 arch/microblaze/include/asm/pgtable.h 	((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
pgd               480 arch/microblaze/include/asm/pgtable.h #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
pgd               190 arch/microblaze/mm/pgtable.c 	pgd_t	*pgd;
pgd               195 arch/microblaze/mm/pgtable.c 	pgd = pgd_offset(mm, addr & PAGE_MASK);
pgd               196 arch/microblaze/mm/pgtable.c 	if (pgd) {
pgd               197 arch/microblaze/mm/pgtable.c 		pmd = pmd_offset(pgd, addr & PAGE_MASK);
pgd               918 arch/mips/include/asm/kvm_host.h void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
pgd                28 arch/mips/include/asm/mmu_context.h #define htw_set_pwbase(pgd)						\
pgd                31 arch/mips/include/asm/mmu_context.h 		write_c0_pwbase(pgd);					\
pgd                40 arch/mips/include/asm/mmu_context.h #define TLBMISS_HANDLER_SETUP_PGD(pgd)					\
pgd                42 arch/mips/include/asm/mmu_context.h 	tlbmiss_handler_setup_pgd((unsigned long)(pgd));		\
pgd                43 arch/mips/include/asm/mmu_context.h 	htw_set_pwbase((unsigned long)pgd);				\
pgd               147 arch/mips/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd               148 arch/mips/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                50 arch/mips/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                52 arch/mips/include/asm/pgalloc.h 	free_pages((unsigned long)pgd, PGD_ORDER);
pgd                99 arch/mips/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
pgd               101 arch/mips/include/asm/pgalloc.h 	set_pgd(pgd, __pgd((unsigned long)pud));
pgd               210 arch/mips/include/asm/pgtable-32.h #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
pgd               191 arch/mips/include/asm/pgtable-64.h static inline int pgd_none(pgd_t pgd)
pgd               193 arch/mips/include/asm/pgtable-64.h 	return pgd_val(pgd) == (unsigned long)invalid_pud_table;
pgd               196 arch/mips/include/asm/pgtable-64.h static inline int pgd_bad(pgd_t pgd)
pgd               198 arch/mips/include/asm/pgtable-64.h 	if (unlikely(pgd_val(pgd) & ~PAGE_MASK))
pgd               204 arch/mips/include/asm/pgtable-64.h static inline int pgd_present(pgd_t pgd)
pgd               206 arch/mips/include/asm/pgtable-64.h 	return pgd_val(pgd) != (unsigned long)invalid_pud_table;
pgd               216 arch/mips/include/asm/pgtable-64.h static inline unsigned long pgd_page_vaddr(pgd_t pgd)
pgd               218 arch/mips/include/asm/pgtable-64.h 	return pgd_val(pgd);
pgd               221 arch/mips/include/asm/pgtable-64.h #define pgd_phys(pgd)		virt_to_phys((void *)pgd_val(pgd))
pgd               222 arch/mips/include/asm/pgtable-64.h #define pgd_page(pgd)		(pfn_to_page(pgd_phys(pgd) >> PAGE_SHIFT))
pgd               224 arch/mips/include/asm/pgtable-64.h static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
pgd               226 arch/mips/include/asm/pgtable-64.h 	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
pgd               229 arch/mips/include/asm/pgtable-64.h static inline void set_pgd(pgd_t *pgd, pgd_t pgdval)
pgd               231 arch/mips/include/asm/pgtable-64.h 	*pgd = pgdval;
pgd               333 arch/mips/include/asm/pgtable-64.h #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
pgd               188 arch/mips/kernel/asm-offsets.c 	OFFSET(MM_PGD, mm_struct, pgd);
pgd              1011 arch/mips/kvm/emulate.c 		kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
pgd               323 arch/mips/kvm/entry.c 	UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
pgd               413 arch/mips/kvm/entry.c 	UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) -
pgd               146 arch/mips/kvm/mips.c 	kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
pgd               147 arch/mips/kvm/mips.c 	if (!kvm->arch.gpa_mm.pgd)
pgd               176 arch/mips/kvm/mips.c 	pgd_free(NULL, kvm->arch.gpa_mm.pgd);
pgd               136 arch/mips/kvm/mmu.c static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
pgd               142 arch/mips/kvm/mmu.c 	pgd += pgd_index(addr);
pgd               143 arch/mips/kvm/mmu.c 	if (pgd_none(*pgd)) {
pgd               148 arch/mips/kvm/mmu.c 	pud = pud_offset(pgd, addr);
pgd               177 arch/mips/kvm/mmu.c 	return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
pgd               258 arch/mips/kvm/mmu.c static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
pgd               269 arch/mips/kvm/mmu.c 		if (!pgd_present(pgd[i]))
pgd               272 arch/mips/kvm/mmu.c 		pud = pud_offset(pgd + i, 0);
pgd               277 arch/mips/kvm/mmu.c 			pgd_clear(pgd + i);
pgd               301 arch/mips/kvm/mmu.c 	return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
pgd               377 arch/mips/kvm/mmu.c static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start,	\
pgd               388 arch/mips/kvm/mmu.c 		if (!pgd_present(pgd[i]))				\
pgd               391 arch/mips/kvm/mmu.c 		pud = pud_offset(pgd + i, 0);				\
pgd               425 arch/mips/kvm/mmu.c 	return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
pgd               463 arch/mips/kvm/mmu.c 	return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
pgd               799 arch/mips/kvm/mmu.c 		pgdp = vcpu->arch.guest_kernel_mm.pgd;
pgd               801 arch/mips/kvm/mmu.c 		pgdp = vcpu->arch.guest_user_mm.pgd;
pgd               814 arch/mips/kvm/mmu.c 	pgdp = vcpu->arch.guest_kernel_mm.pgd;
pgd               822 arch/mips/kvm/mmu.c 		pgdp = vcpu->arch.guest_user_mm.pgd;
pgd               916 arch/mips/kvm/mmu.c static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
pgd               927 arch/mips/kvm/mmu.c 		if (!pgd_present(pgd[i]))
pgd               930 arch/mips/kvm/mmu.c 		pud = pud_offset(pgd + i, 0);
pgd               935 arch/mips/kvm/mmu.c 			pgd_clear(pgd + i);
pgd               944 arch/mips/kvm/mmu.c void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
pgd               950 arch/mips/kvm/mmu.c 			kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
pgd               953 arch/mips/kvm/mmu.c 			kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
pgd               956 arch/mips/kvm/mmu.c 		kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
pgd               960 arch/mips/kvm/mmu.c 			kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
pgd               549 arch/mips/kvm/trap_emul.c 	kern_mm->pgd = pgd_alloc(kern_mm);
pgd               550 arch/mips/kvm/trap_emul.c 	if (!kern_mm->pgd)
pgd               553 arch/mips/kvm/trap_emul.c 	user_mm->pgd = pgd_alloc(user_mm);
pgd               554 arch/mips/kvm/trap_emul.c 	if (!user_mm->pgd) {
pgd               555 arch/mips/kvm/trap_emul.c 		pgd_free(kern_mm, kern_mm->pgd);
pgd               562 arch/mips/kvm/trap_emul.c static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
pgd               573 arch/mips/kvm/trap_emul.c 		if (pgd_none(pgd[i]))
pgd               579 arch/mips/kvm/trap_emul.c 		pud = pud_offset(pgd + i, 0);
pgd               602 arch/mips/kvm/trap_emul.c 	pgd_free(NULL, pgd);
pgd               607 arch/mips/kvm/trap_emul.c 	kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
pgd               608 arch/mips/kvm/trap_emul.c 	kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
pgd              1098 arch/mips/kvm/trap_emul.c 		kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
pgd              1099 arch/mips/kvm/trap_emul.c 		kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
pgd              1111 arch/mips/kvm/trap_emul.c 			TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
pgd              1212 arch/mips/kvm/trap_emul.c 			kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
pgd               263 arch/mips/mm/context.c 	TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
pgd               296 arch/mips/mm/fault.c 		pgd_t *pgd, *pgd_k;
pgd               301 arch/mips/mm/fault.c 		pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
pgd               302 arch/mips/mm/fault.c 		pgd_k = init_mm.pgd + offset;
pgd               306 arch/mips/mm/fault.c 		set_pgd(pgd, *pgd_k);
pgd               308 arch/mips/mm/fault.c 		pud = pud_offset(pgd, address);
pgd                27 arch/mips/mm/hugetlbpage.c 	pgd_t *pgd;
pgd                31 arch/mips/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
pgd                32 arch/mips/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, addr);
pgd                42 arch/mips/mm/hugetlbpage.c 	pgd_t *pgd;
pgd                46 arch/mips/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
pgd                47 arch/mips/mm/hugetlbpage.c 	if (pgd_present(*pgd)) {
pgd                48 arch/mips/mm/hugetlbpage.c 		pud = pud_offset(pgd, addr);
pgd               234 arch/mips/mm/init.c 	pgd_t *pgd;
pgd               245 arch/mips/mm/init.c 	pgd = pgd_base + i;
pgd               247 arch/mips/mm/init.c 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pgd               248 arch/mips/mm/init.c 		pud = (pud_t *)pgd;
pgd                58 arch/mips/mm/pgtable-32.c 	pgd_t *pgd;
pgd                84 arch/mips/mm/pgtable-32.c 	pgd = swapper_pg_dir + __pgd_offset(vaddr);
pgd                85 arch/mips/mm/pgtable-32.c 	pud = pud_offset(pgd, vaddr);
pgd                24 arch/nds32/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
pgd                42 arch/nds32/include/asm/pgtable.h #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
pgd               349 arch/nds32/include/asm/pgtable.h #define pgd_none(pgd)		(0)
pgd               350 arch/nds32/include/asm/pgtable.h #define pgd_bad(pgd)		(0)
pgd               351 arch/nds32/include/asm/pgtable.h #define pgd_present(pgd)  	(1)
pgd               365 arch/nds32/include/asm/pgtable.h #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
pgd                25 arch/nds32/mm/fault.c 	pgd_t *pgd;
pgd                29 arch/nds32/mm/fault.c 	pr_alert("pgd = %p\n", mm->pgd);
pgd                30 arch/nds32/mm/fault.c 	pgd = pgd_offset(mm, addr);
pgd                31 arch/nds32/mm/fault.c 	pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
pgd                36 arch/nds32/mm/fault.c 		if (pgd_none(*pgd))
pgd                39 arch/nds32/mm/fault.c 		if (pgd_bad(*pgd)) {
pgd                44 arch/nds32/mm/fault.c 		pmd = pmd_offset(pgd, addr);
pgd               361 arch/nds32/mm/fault.c 		pgd_t *pgd, *pgd_k;
pgd               366 arch/nds32/mm/fault.c 		pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
pgd               367 arch/nds32/mm/fault.c 		pgd_k = init_mm.pgd + index;
pgd               372 arch/nds32/mm/fault.c 		pud = pud_offset(pgd, addr);
pgd               102 arch/nds32/mm/init.c 	pgd_t *pgd;
pgd               113 arch/nds32/mm/init.c 	pgd = swapper_pg_dir + pgd_index(vaddr);
pgd               114 arch/nds32/mm/init.c 	pud = pud_offset(pgd, vaddr);
pgd               128 arch/nds32/mm/init.c 	pgd = swapper_pg_dir + pgd_index(vaddr);
pgd               129 arch/nds32/mm/init.c 	pud = pud_offset(pgd, vaddr);
pgd                41 arch/nds32/mm/mm-nds32.c void pgd_free(struct mm_struct *mm, pgd_t * pgd)
pgd                46 arch/nds32/mm/mm-nds32.c 	if (!pgd)
pgd                49 arch/nds32/mm/mm-nds32.c 	pmd = (pmd_t *) pgd;
pgd                60 arch/nds32/mm/mm-nds32.c 	dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
pgd                65 arch/nds32/mm/mm-nds32.c 	free_pages((unsigned long)pgd, 0);
pgd                76 arch/nds32/mm/mm-nds32.c 	pgd_t *pgd;
pgd                80 arch/nds32/mm/mm-nds32.c 	if (current->mm && current->mm->pgd)
pgd                81 arch/nds32/mm/mm-nds32.c 		pgd = current->mm->pgd;
pgd                83 arch/nds32/mm/mm-nds32.c 		pgd = init_mm.pgd;
pgd                87 arch/nds32/mm/mm-nds32.c 		pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
pgd                34 arch/nds32/mm/proc.c 	pgd_t *pgd;
pgd                39 arch/nds32/mm/proc.c 	pgd = pgd_offset(mm, addr);
pgd                40 arch/nds32/mm/proc.c 	if (!pgd_none(*pgd)) {
pgd                41 arch/nds32/mm/proc.c 		pud = pud_offset(pgd, addr);
pgd               532 arch/nds32/mm/proc.c 	__nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
pgd                62 arch/nios2/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                66 arch/nios2/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                37 arch/nios2/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                39 arch/nios2/include/asm/pgalloc.h 	free_pages((unsigned long)pgd, PGD_ORDER);
pgd               103 arch/nios2/include/asm/pgtable.h 	pmdptr->pud.pgd.pgd = pmdval.pud.pgd.pgd;
pgd               108 arch/nios2/include/asm/pgtable.h #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
pgd               247 arch/nios2/mm/fault.c 		pgd_t *pgd, *pgd_k;
pgd               252 arch/nios2/mm/fault.c 		pgd = pgd_current + offset;
pgd               253 arch/nios2/mm/fault.c 		pgd_k = init_mm.pgd + offset;
pgd               257 arch/nios2/mm/fault.c 		set_pgd(pgd, *pgd_k);
pgd               259 arch/nios2/mm/fault.c 		pud = pud_offset(pgd, address);
pgd                94 arch/nios2/mm/mmu_context.c 	pgd_current = next->pgd;
pgd               110 arch/nios2/mm/mmu_context.c 	pgd_current = next->pgd;
pgd                37 arch/nios2/mm/pgtable.c static void pgd_init(pgd_t *pgd)
pgd                39 arch/nios2/mm/pgtable.c 	unsigned long *p = (unsigned long *) pgd;
pgd                52 arch/openrisc/include/asm/page.h 	unsigned long pgd;
pgd                60 arch/openrisc/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                64 arch/openrisc/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                66 arch/openrisc/include/asm/pgalloc.h 	free_page((unsigned long)pgd);
pgd               376 arch/openrisc/include/asm/pgtable.h #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
pgd                95 arch/openrisc/kernel/smp.c 	current_pgd[cpu] = init_mm.pgd;
pgd               298 arch/openrisc/mm/fault.c 		pgd_t *pgd, *pgd_k;
pgd               310 arch/openrisc/mm/fault.c 		pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
pgd               311 arch/openrisc/mm/fault.c 		pgd_k = init_mm.pgd + offset;
pgd               325 arch/openrisc/mm/fault.c 		pud = pud_offset(pgd, address);
pgd               147 arch/openrisc/mm/init.c 	current_pgd[smp_processor_id()] = init_mm.pgd;
pgd               147 arch/openrisc/mm/tlb.c 	current_pgd[smp_processor_id()] = next->pgd;
pgd                57 arch/parisc/include/asm/mmu_context.h 		mtctl(__pa(next->pgd), 25);
pgd                46 arch/parisc/include/asm/page.h typedef struct { __u32 pgd; } pgd_t;
pgd                52 arch/parisc/include/asm/page.h #define pgd_val(x)	((x).pgd + 0)
pgd                61 arch/parisc/include/asm/page.h #define __pgd_val_set(x,n) (x).pgd = (n)
pgd                26 arch/parisc/include/asm/pgalloc.h 	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
pgd                28 arch/parisc/include/asm/pgalloc.h 	pgd_t *actual_pgd = pgd;
pgd                30 arch/parisc/include/asm/pgalloc.h 	if (likely(pgd != NULL)) {
pgd                31 arch/parisc/include/asm/pgalloc.h 		memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
pgd                40 arch/parisc/include/asm/pgalloc.h 			+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
pgd                43 arch/parisc/include/asm/pgalloc.h 		__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
pgd                50 arch/parisc/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                53 arch/parisc/include/asm/pgalloc.h 	pgd -= PTRS_PER_PGD;
pgd                55 arch/parisc/include/asm/pgalloc.h 	free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
pgd                62 arch/parisc/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
pgd                64 arch/parisc/include/asm/pgalloc.h 	__pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
pgd                93 arch/parisc/include/asm/pgtable.h 		spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
pgd                97 arch/parisc/include/asm/pgtable.h 		spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
pgd               346 arch/parisc/include/asm/pgtable.h #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
pgd               347 arch/parisc/include/asm/pgtable.h #define pgd_page(pgd)	virt_to_page((void *)pgd_page_vaddr(pgd))
pgd               354 arch/parisc/include/asm/pgtable.h static inline void pgd_clear(pgd_t *pgd) {
pgd               356 arch/parisc/include/asm/pgtable.h 	if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
pgd               361 arch/parisc/include/asm/pgtable.h 	__pgd_val_set(*pgd, 0);
pgd               369 arch/parisc/include/asm/pgtable.h static inline int pgd_none(pgd_t pgd)		{ return 0; }
pgd               370 arch/parisc/include/asm/pgtable.h static inline int pgd_bad(pgd_t pgd)		{ return 0; }
pgd               371 arch/parisc/include/asm/pgtable.h static inline int pgd_present(pgd_t pgd)	{ return 1; }
pgd               445 arch/parisc/include/asm/pgtable.h ((mm)->pgd + ((address) >> PGDIR_SHIFT))
pgd               490 arch/parisc/include/asm/pgtable.h static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
pgd               492 arch/parisc/include/asm/pgtable.h 	if (unlikely(pgd == swapper_pg_dir))
pgd               494 arch/parisc/include/asm/pgtable.h 	return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
pgd               506 arch/parisc/include/asm/pgtable.h 	spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
pgd               509 arch/parisc/include/asm/pgtable.h 		spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
pgd               514 arch/parisc/include/asm/pgtable.h 	spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
pgd               524 arch/parisc/include/asm/pgtable.h 	spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
pgd               528 arch/parisc/include/asm/pgtable.h 	spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
pgd               536 arch/parisc/include/asm/pgtable.h 	spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
pgd               539 arch/parisc/include/asm/pgtable.h 	spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
pgd               532 arch/parisc/kernel/cache.c static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
pgd               536 arch/parisc/kernel/cache.c 	if (!pgd_none(*pgd)) {
pgd               537 arch/parisc/kernel/cache.c 		pud_t *pud = pud_offset(pgd, addr);
pgd               550 arch/parisc/kernel/cache.c 	pgd_t *pgd;
pgd               572 arch/parisc/kernel/cache.c 	pgd = mm->pgd;
pgd               579 arch/parisc/kernel/cache.c 			pte_t *ptep = get_ptep(pgd, addr);
pgd               598 arch/parisc/kernel/cache.c 	pgd_t *pgd;
pgd               617 arch/parisc/kernel/cache.c 	pgd = vma->vm_mm->pgd;
pgd               620 arch/parisc/kernel/cache.c 		pte_t *ptep = get_ptep(pgd, addr);
pgd                16 arch/parisc/mm/fixmap.c 	pgd_t *pgd = pgd_offset_k(vaddr);
pgd                17 arch/parisc/mm/fixmap.c 	pmd_t *pmd = pmd_offset(pgd, vaddr);
pgd                21 arch/parisc/mm/fixmap.c 		pmd = pmd_alloc(NULL, pgd, vaddr);
pgd                34 arch/parisc/mm/fixmap.c 	pgd_t *pgd = pgd_offset_k(vaddr);
pgd                35 arch/parisc/mm/fixmap.c 	pmd_t *pmd = pmd_offset(pgd, vaddr);
pgd                51 arch/parisc/mm/hugetlbpage.c 	pgd_t *pgd;
pgd                63 arch/parisc/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
pgd                64 arch/parisc/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, addr);
pgd                76 arch/parisc/mm/hugetlbpage.c 	pgd_t *pgd;
pgd                83 arch/parisc/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
pgd                84 arch/parisc/mm/hugetlbpage.c 	if (!pgd_none(*pgd)) {
pgd                85 arch/parisc/mm/hugetlbpage.c 		pud = pud_offset(pgd, addr);
pgd               142 arch/parisc/mm/hugetlbpage.c 	spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
pgd               144 arch/parisc/mm/hugetlbpage.c 	spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
pgd               154 arch/parisc/mm/hugetlbpage.c 	spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
pgd               157 arch/parisc/mm/hugetlbpage.c 	spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
pgd               169 arch/parisc/mm/hugetlbpage.c 	spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
pgd               172 arch/parisc/mm/hugetlbpage.c 	spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
pgd               183 arch/parisc/mm/hugetlbpage.c 	spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
pgd               188 arch/parisc/mm/hugetlbpage.c 	spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
pgd                14 arch/powerpc/include/asm/book3s/32/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                16 arch/powerpc/include/asm/book3s/32/pgalloc.h 	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
pgd               357 arch/powerpc/include/asm/book3s/32/pgtable.h #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
pgd               137 arch/powerpc/include/asm/book3s/64/hash.h static inline int hash__pgd_bad(pgd_t pgd)
pgd               139 arch/powerpc/include/asm/book3s/64/hash.h 	return (pgd_val(pgd) == 0);
pgd                39 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                42 arch/powerpc/include/asm/book3s/64/pgalloc.h 	free_page((unsigned long)pgd);
pgd                44 arch/powerpc/include/asm/book3s/64/pgalloc.h 	free_pages((unsigned long)pgd, 4);
pgd                50 arch/powerpc/include/asm/book3s/64/pgalloc.h 	pgd_t *pgd;
pgd                55 arch/powerpc/include/asm/book3s/64/pgalloc.h 	pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgd                57 arch/powerpc/include/asm/book3s/64/pgalloc.h 	if (unlikely(!pgd))
pgd                58 arch/powerpc/include/asm/book3s/64/pgalloc.h 		return pgd;
pgd                65 arch/powerpc/include/asm/book3s/64/pgalloc.h 	kmemleak_no_scan(pgd);
pgd                76 arch/powerpc/include/asm/book3s/64/pgalloc.h 	memset(pgd, 0, PGD_TABLE_SIZE);
pgd                78 arch/powerpc/include/asm/book3s/64/pgalloc.h 	return pgd;
pgd                81 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                84 arch/powerpc/include/asm/book3s/64/pgalloc.h 		return radix__pgd_free(mm, pgd);
pgd                85 arch/powerpc/include/asm/book3s/64/pgalloc.h 	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
pgd                88 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
pgd                90 arch/powerpc/include/asm/book3s/64/pgalloc.h 	*pgd =  __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
pgd                29 arch/powerpc/include/asm/book3s/64/pgtable-4k.h static inline int pgd_huge(pgd_t pgd)
pgd                35 arch/powerpc/include/asm/book3s/64/pgtable-4k.h 		return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
pgd                33 arch/powerpc/include/asm/book3s/64/pgtable-64k.h static inline int pgd_huge(pgd_t pgd)
pgd                38 arch/powerpc/include/asm/book3s/64/pgtable-64k.h 	return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
pgd               952 arch/powerpc/include/asm/book3s/64/pgtable.h #define pgd_write(pgd)		pte_write(pgd_pte(pgd))
pgd               959 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pgd_none(pgd_t pgd)
pgd               961 arch/powerpc/include/asm/book3s/64/pgtable.h 	return !pgd_raw(pgd);
pgd               964 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pgd_present(pgd_t pgd)
pgd               966 arch/powerpc/include/asm/book3s/64/pgtable.h 	return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
pgd               969 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pte_t pgd_pte(pgd_t pgd)
pgd               971 arch/powerpc/include/asm/book3s/64/pgtable.h 	return __pte_raw(pgd_raw(pgd));
pgd               979 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pgd_bad(pgd_t pgd)
pgd               982 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__pgd_bad(pgd);
pgd               983 arch/powerpc/include/asm/book3s/64/pgtable.h 	return hash__pgd_bad(pgd);
pgd               987 arch/powerpc/include/asm/book3s/64/pgtable.h static inline bool pgd_access_permitted(pgd_t pgd, bool write)
pgd               989 arch/powerpc/include/asm/book3s/64/pgtable.h 	return pte_access_permitted(pgd_pte(pgd), write);
pgd               992 arch/powerpc/include/asm/book3s/64/pgtable.h extern struct page *pgd_page(pgd_t pgd);
pgd               999 arch/powerpc/include/asm/book3s/64/pgtable.h #define pgd_page_vaddr(pgd)	__va(pgd_val(pgd) & ~PGD_MASKED_BITS)
pgd              1011 arch/powerpc/include/asm/book3s/64/pgtable.h #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
pgd              1321 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pgd_devmap(pgd_t pgd)
pgd              1372 arch/powerpc/include/asm/book3s/64/pgtable.h static inline bool pgd_is_leaf(pgd_t pgd)
pgd              1374 arch/powerpc/include/asm/book3s/64/pgtable.h 	return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
pgd               230 arch/powerpc/include/asm/book3s/64/radix.h static inline int radix__pgd_bad(pgd_t pgd)
pgd               232 arch/powerpc/include/asm/book3s/64/radix.h 	return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS);
pgd               211 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
pgd                58 arch/powerpc/include/asm/mmu_context.h extern void set_context(unsigned long id, pgd_t *pgd);
pgd               228 arch/powerpc/include/asm/mmu_context.h 	get_paca()->pgd = NULL;
pgd               362 arch/powerpc/include/asm/nohash/32/pgtable.h #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
pgd                56 arch/powerpc/include/asm/nohash/64/pgtable-4k.h #define pgd_none(pgd)		(!pgd_val(pgd))
pgd                57 arch/powerpc/include/asm/nohash/64/pgtable-4k.h #define pgd_bad(pgd)		(pgd_val(pgd) == 0)
pgd                58 arch/powerpc/include/asm/nohash/64/pgtable-4k.h #define pgd_present(pgd)	(pgd_val(pgd) != 0)
pgd                59 arch/powerpc/include/asm/nohash/64/pgtable-4k.h #define pgd_page_vaddr(pgd)	(pgd_val(pgd) & ~PGD_MASKED_BITS)
pgd                68 arch/powerpc/include/asm/nohash/64/pgtable-4k.h static inline pte_t pgd_pte(pgd_t pgd)
pgd                70 arch/powerpc/include/asm/nohash/64/pgtable-4k.h 	return __pte(pgd_val(pgd));
pgd                77 arch/powerpc/include/asm/nohash/64/pgtable-4k.h extern struct page *pgd_page(pgd_t pgd);
pgd               178 arch/powerpc/include/asm/nohash/64/pgtable.h #define pgd_write(pgd)		pte_write(pgd_pte(pgd))
pgd               191 arch/powerpc/include/asm/nohash/64/pgtable.h #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
pgd                26 arch/powerpc/include/asm/nohash/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                28 arch/powerpc/include/asm/nohash/pgalloc.h 	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
pgd               287 arch/powerpc/include/asm/nohash/pgtable.h static inline int pgd_huge(pgd_t pgd)
pgd               127 arch/powerpc/include/asm/paca.h 	pgd_t *pgd __aligned(0x40); /* Current PGD */
pgd               317 arch/powerpc/include/asm/page.h #define pgd_huge(pgd)		(0)
pgd                53 arch/powerpc/include/asm/pgtable-be-types.h typedef struct { __be64 pgd; } pgd_t;
pgd                58 arch/powerpc/include/asm/pgtable-be-types.h 	return be64_to_cpu(x.pgd);
pgd                63 arch/powerpc/include/asm/pgtable-be-types.h 	return x.pgd;
pgd                36 arch/powerpc/include/asm/pgtable-types.h typedef struct { unsigned long pgd; } pgd_t;
pgd                40 arch/powerpc/include/asm/pgtable-types.h 	return x.pgd;
pgd               150 arch/powerpc/include/asm/pgtable.h static inline bool pgd_is_leaf(pgd_t pgd)
pgd                31 arch/powerpc/include/asm/pte-walk.h 	pgd_t *pgdir = init_mm.pgd;
pgd                44 arch/powerpc/include/asm/pte-walk.h 	VM_WARN(pgdir != current->mm->pgd,
pgd               205 arch/powerpc/kernel/asm-offsets.c 	OFFSET(PACAPGD, paca_struct, pgd);
pgd               361 arch/powerpc/kernel/asm-offsets.c 	OFFSET(MM_PGD, mm_struct, pgd);
pgd                41 arch/powerpc/kernel/mce_power.c 	ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
pgd               299 arch/powerpc/kvm/book3s_64_mmu_hv.c 				current->mm->pgd, false, pte_idx_ret);
pgd               624 arch/powerpc/kvm/book3s_64_mmu_hv.c 			ptep = find_current_mm_pte(current->mm->pgd,
pgd               497 arch/powerpc/kvm/book3s_64_mmu_radix.c void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
pgd               501 arch/powerpc/kvm/book3s_64_mmu_radix.c 	for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
pgd               504 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (!pgd_present(*pgd))
pgd               506 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud = pud_offset(pgd, 0);
pgd               508 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pgd_clear(pgd);
pgd               568 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pgd_t *pgd;
pgd               575 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pgd = pgtable + pgd_index(gpa);
pgd               577 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (pgd_present(*pgd))
pgd               578 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud = pud_offset(pgd, gpa);
pgd               599 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (pgd_none(*pgd)) {
pgd               602 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pgd_populate(kvm->mm, pgd, new_pud);
pgd               605 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud = pud_offset(pgd, gpa);
pgd              1199 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pgd_t pgd, *pgdp;
pgd              1272 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pgd = READ_ONCE(*pgdp);
pgd              1273 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
pgd              1278 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pudp = pud_offset(&pgd, gpa);
pgd              4286 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.pgdir = current->mm->pgd;
pgd               778 arch/powerpc/kvm/booke.c 	vcpu->arch.pgdir = current->mm->pgd;
pgd               202 arch/powerpc/mm/book3s64/hash_tlb.c 	BUG_ON(!mm->pgd);
pgd               215 arch/powerpc/mm/book3s64/hash_tlb.c 		pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
pgd              1292 arch/powerpc/mm/book3s64/hash_utils.c 	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
pgd              1301 arch/powerpc/mm/book3s64/hash_utils.c 	pgdir = mm->pgd;
pgd              1538 arch/powerpc/mm/book3s64/hash_utils.c 		" trap=%lx\n", mm, mm->pgd, ea, access, trap);
pgd              1541 arch/powerpc/mm/book3s64/hash_utils.c 	pgdir = mm->pgd;
pgd              1664 arch/powerpc/mm/book3s64/hash_utils.c 	if (!mm || !mm->pgd)
pgd              1668 arch/powerpc/mm/book3s64/hash_utils.c 	ptep = find_linux_pte(mm->pgd, address, NULL, NULL);
pgd               167 arch/powerpc/mm/book3s64/mmu_context.c 	process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
pgd               370 arch/powerpc/mm/book3s64/radix_pgtable.c 	process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
pgd               395 arch/powerpc/mm/book3s64/radix_pgtable.c 	dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
pgd               844 arch/powerpc/mm/book3s64/radix_pgtable.c 	pgd_t *pgd;
pgd               851 arch/powerpc/mm/book3s64/radix_pgtable.c 		pgd = pgd_offset_k(addr);
pgd               852 arch/powerpc/mm/book3s64/radix_pgtable.c 		if (!pgd_present(*pgd))
pgd               855 arch/powerpc/mm/book3s64/radix_pgtable.c 		if (pgd_is_leaf(*pgd)) {
pgd               856 arch/powerpc/mm/book3s64/radix_pgtable.c 			split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
pgd               860 arch/powerpc/mm/book3s64/radix_pgtable.c 		pud_base = (pud_t *)pgd_page_vaddr(*pgd);
pgd                56 arch/powerpc/mm/book3s64/subpage_prot.c 	pgd_t *pgd;
pgd                62 arch/powerpc/mm/book3s64/subpage_prot.c 	pgd = pgd_offset(mm, addr);
pgd                63 arch/powerpc/mm/book3s64/subpage_prot.c 	if (pgd_none(*pgd))
pgd                65 arch/powerpc/mm/book3s64/subpage_prot.c 	pud = pud_offset(pgd, addr);
pgd                33 arch/powerpc/mm/copro_fault.c 	if (mm->pgd == NULL)
pgd                41 arch/powerpc/mm/hugetlbpage.c 	return __find_linux_pte(mm->pgd, addr, NULL, NULL);
pgd               393 arch/powerpc/mm/hugetlbpage.c static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pgd               403 arch/powerpc/mm/hugetlbpage.c 		pud = pud_offset(pgd, addr);
pgd               438 arch/powerpc/mm/hugetlbpage.c 	pud = pud_offset(pgd, start);
pgd               439 arch/powerpc/mm/hugetlbpage.c 	pgd_clear(pgd);
pgd               451 arch/powerpc/mm/hugetlbpage.c 	pgd_t *pgd;
pgd               473 arch/powerpc/mm/hugetlbpage.c 		pgd = pgd_offset(tlb->mm, addr);
pgd               474 arch/powerpc/mm/hugetlbpage.c 		if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
pgd               475 arch/powerpc/mm/hugetlbpage.c 			if (pgd_none_or_clear_bad(pgd))
pgd               477 arch/powerpc/mm/hugetlbpage.c 			hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
pgd               486 arch/powerpc/mm/hugetlbpage.c 			more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
pgd               490 arch/powerpc/mm/hugetlbpage.c 			free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
pgd                20 arch/powerpc/mm/mmu_context.c 	tsk->thread.pgdir = mm->pgd;
pgd                27 arch/powerpc/mm/mmu_context.c 	get_paca()->pgd = mm->pgd;
pgd               182 arch/powerpc/mm/nohash/8xx.c void set_context(unsigned long id, pgd_t *pgd)
pgd               190 arch/powerpc/mm/nohash/8xx.c 		abatron_pteptrs[1] = pgd;
pgd               197 arch/powerpc/mm/nohash/8xx.c 	mtspr(SPRN_M_TWB, __pa(pgd) - offset);
pgd               360 arch/powerpc/mm/nohash/mmu_context.c 	set_context(id, next->pgd);
pgd               267 arch/powerpc/mm/pgtable.c 	pgd_t *pgd;
pgd               273 arch/powerpc/mm/pgtable.c 	pgd = mm->pgd + pgd_index(addr);
pgd               274 arch/powerpc/mm/pgtable.c 	BUG_ON(pgd_none(*pgd));
pgd               275 arch/powerpc/mm/pgtable.c 	pud = pud_offset(pgd, addr);
pgd               315 arch/powerpc/mm/pgtable.c 	pgd_t pgd, *pgdp;
pgd               329 arch/powerpc/mm/pgtable.c 	pgd  = READ_ONCE(*pgdp);
pgd               336 arch/powerpc/mm/pgtable.c 	if (pgd_none(pgd))
pgd               339 arch/powerpc/mm/pgtable.c 	if (pgd_is_leaf(pgd)) {
pgd               344 arch/powerpc/mm/pgtable.c 	if (is_hugepd(__hugepd(pgd_val(pgd)))) {
pgd               345 arch/powerpc/mm/pgtable.c 		hpdp = (hugepd_t *)&pgd;
pgd               355 arch/powerpc/mm/pgtable.c 	pudp = pud_offset(&pgd, ea);
pgd               135 arch/powerpc/mm/pgtable_32.c         pgd_t	*pgd;
pgd               141 arch/powerpc/mm/pgtable_32.c         pgd = pgd_offset(mm, addr & PAGE_MASK);
pgd               142 arch/powerpc/mm/pgtable_32.c         if (pgd) {
pgd               143 arch/powerpc/mm/pgtable_32.c 		pud = pud_offset(pgd, addr & PAGE_MASK);
pgd               104 arch/powerpc/mm/pgtable_64.c struct page *pgd_page(pgd_t pgd)
pgd               106 arch/powerpc/mm/pgtable_64.c 	if (pgd_is_leaf(pgd)) {
pgd               107 arch/powerpc/mm/pgtable_64.c 		VM_WARN_ON(!pgd_huge(pgd));
pgd               108 arch/powerpc/mm/pgtable_64.c 		return pte_page(pgd_pte(pgd));
pgd               110 arch/powerpc/mm/pgtable_64.c 	return virt_to_page(pgd_page_vaddr(pgd));
pgd               420 arch/powerpc/mm/ptdump/hashpagetable.c static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
pgd               422 arch/powerpc/mm/ptdump/hashpagetable.c 	pud_t *pud = pud_offset(pgd, 0);
pgd               436 arch/powerpc/mm/ptdump/hashpagetable.c 	pgd_t *pgd = pgd_offset_k(0UL);
pgd               444 arch/powerpc/mm/ptdump/hashpagetable.c 	for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
pgd               446 arch/powerpc/mm/ptdump/hashpagetable.c 		if (!pgd_none(*pgd))
pgd               448 arch/powerpc/mm/ptdump/hashpagetable.c 			walk_pud(st, pgd, addr);
pgd               281 arch/powerpc/mm/ptdump/ptdump.c static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
pgd               283 arch/powerpc/mm/ptdump/ptdump.c 	pud_t *pud = pud_offset(pgd, 0);
pgd               301 arch/powerpc/mm/ptdump/ptdump.c 	pgd_t *pgd = pgd_offset_k(addr);
pgd               307 arch/powerpc/mm/ptdump/ptdump.c 	for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
pgd               308 arch/powerpc/mm/ptdump/ptdump.c 		if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd))
pgd               310 arch/powerpc/mm/ptdump/ptdump.c 			walk_pud(st, pgd, addr);
pgd               312 arch/powerpc/mm/ptdump/ptdump.c 			note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE);
pgd               122 arch/powerpc/perf/callchain.c 	pgdir = current->mm->pgd;
pgd              2487 arch/powerpc/xmon/xmon.c 	DUMP(p, pgd, "%-*px");
pgd                63 arch/riscv/include/asm/page.h 	unsigned long pgd;
pgd                78 arch/riscv/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                44 arch/riscv/include/asm/pgalloc.h 	pgd_t *pgd;
pgd                46 arch/riscv/include/asm/pgalloc.h 	pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
pgd                47 arch/riscv/include/asm/pgalloc.h 	if (likely(pgd != NULL)) {
pgd                48 arch/riscv/include/asm/pgalloc.h 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
pgd                50 arch/riscv/include/asm/pgalloc.h 		memcpy(pgd + USER_PTRS_PER_PGD,
pgd                51 arch/riscv/include/asm/pgalloc.h 			init_mm.pgd + USER_PTRS_PER_PGD,
pgd                54 arch/riscv/include/asm/pgalloc.h 	return pgd;
pgd                57 arch/riscv/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                59 arch/riscv/include/asm/pgalloc.h 	free_page((unsigned long)pgd);
pgd               153 arch/riscv/include/asm/pgtable.h static inline unsigned long _pgd_pfn(pgd_t pgd)
pgd               155 arch/riscv/include/asm/pgtable.h 	return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
pgd               163 arch/riscv/include/asm/pgtable.h 	return mm->pgd + pgd_index(addr);
pgd                61 arch/riscv/mm/context.c 	csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
pgd               215 arch/riscv/mm/fault.c 		pgd_t *pgd, *pgd_k;
pgd               235 arch/riscv/mm/fault.c 		pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
pgd               236 arch/riscv/mm/fault.c 		pgd_k = init_mm.pgd + index;
pgd               240 arch/riscv/mm/fault.c 		set_pgd(pgd, *pgd_k);
pgd               242 arch/riscv/mm/fault.c 		p4d = p4d_offset(pgd, addr);
pgd                47 arch/s390/include/asm/mmu_context.h 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
pgd                52 arch/s390/include/asm/mmu_context.h 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
pgd                57 arch/s390/include/asm/mmu_context.h 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
pgd                62 arch/s390/include/asm/mmu_context.h 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
pgd                65 arch/s390/include/asm/mmu_context.h 	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
pgd                85 arch/s390/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                94 arch/s390/include/asm/page.h #define pgd_val(x)      ((x).pgd)
pgd               102 arch/s390/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
pgd               104 arch/s390/include/asm/pgalloc.h 	pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
pgd               133 arch/s390/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd               136 arch/s390/include/asm/pgalloc.h 		pgtable_pmd_page_dtor(virt_to_page(pgd));
pgd               137 arch/s390/include/asm/pgalloc.h 	crst_table_free(mm, (unsigned long *) pgd);
pgd               598 arch/s390/include/asm/pgtable.h static inline int pgd_folded(pgd_t pgd)
pgd               600 arch/s390/include/asm/pgtable.h 	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
pgd               603 arch/s390/include/asm/pgtable.h static inline int pgd_present(pgd_t pgd)
pgd               605 arch/s390/include/asm/pgtable.h 	if (pgd_folded(pgd))
pgd               607 arch/s390/include/asm/pgtable.h 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
pgd               610 arch/s390/include/asm/pgtable.h static inline int pgd_none(pgd_t pgd)
pgd               612 arch/s390/include/asm/pgtable.h 	if (pgd_folded(pgd))
pgd               614 arch/s390/include/asm/pgtable.h 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
pgd               617 arch/s390/include/asm/pgtable.h static inline int pgd_bad(pgd_t pgd)
pgd               619 arch/s390/include/asm/pgtable.h 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
pgd               621 arch/s390/include/asm/pgtable.h 	return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
pgd               624 arch/s390/include/asm/pgtable.h static inline unsigned long pgd_pfn(pgd_t pgd)
pgd               629 arch/s390/include/asm/pgtable.h 	return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
pgd               890 arch/s390/include/asm/pgtable.h static inline void pgd_clear(pgd_t *pgd)
pgd               892 arch/s390/include/asm/pgtable.h 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
pgd               893 arch/s390/include/asm/pgtable.h 		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
pgd              1222 arch/s390/include/asm/pgtable.h #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
pgd              1235 arch/s390/include/asm/pgtable.h static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
pgd              1241 arch/s390/include/asm/pgtable.h 	rste = pgd_val(*pgd);
pgd              1244 arch/s390/include/asm/pgtable.h 	return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
pgd              1247 arch/s390/include/asm/pgtable.h #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
pgd              1250 arch/s390/include/asm/pgtable.h static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
pgd              1252 arch/s390/include/asm/pgtable.h 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
pgd              1253 arch/s390/include/asm/pgtable.h 		return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
pgd              1254 arch/s390/include/asm/pgtable.h 	return (p4d_t *) pgd;
pgd              1294 arch/s390/include/asm/pgtable.h #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
pgd               213 arch/s390/mm/dump_pagetables.c 			   pgd_t *pgd, unsigned long addr)
pgd               219 arch/s390/mm/dump_pagetables.c 	if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_early_shadow_p4d)) {
pgd               225 arch/s390/mm/dump_pagetables.c 	p4d = p4d_offset(pgd, addr);
pgd               240 arch/s390/mm/dump_pagetables.c 	pgd_t *pgd;
pgd               246 arch/s390/mm/dump_pagetables.c 		pgd = pgd_offset_k(addr);
pgd               247 arch/s390/mm/dump_pagetables.c 		if (!pgd_none(*pgd))
pgd               248 arch/s390/mm/dump_pagetables.c 			walk_p4d_level(m, &st, pgd, addr);
pgd               545 arch/s390/mm/gmap.c 	pgd_t *pgd;
pgd               582 arch/s390/mm/gmap.c 	pgd = pgd_offset(mm, vmaddr);
pgd               583 arch/s390/mm/gmap.c 	VM_BUG_ON(pgd_none(*pgd));
pgd               584 arch/s390/mm/gmap.c 	p4d = p4d_offset(pgd, vmaddr);
pgd                94 arch/s390/mm/init.c 	init_mm.pgd = swapper_pg_dir;
pgd               102 arch/s390/mm/init.c 	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
pgd               105 arch/s390/mm/init.c 	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
pgd               107 arch/s390/mm/init.c 	kasan_copy_shadow(init_mm.pgd);
pgd               204 arch/s390/mm/kasan_init.c static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
pgd               209 arch/s390/mm/kasan_init.c 	S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
pgd               313 arch/s390/mm/kasan_init.c 	init_mm.pgd = early_pg_dir;
pgd               141 arch/s390/mm/page-states.c static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
pgd               148 arch/s390/mm/page-states.c 	p4d = p4d_offset(pgd, addr);
pgd               166 arch/s390/mm/page-states.c 	pgd_t *pgd;
pgd               170 arch/s390/mm/page-states.c 	pgd = pgd_offset_k(addr);
pgd               173 arch/s390/mm/page-states.c 		if (pgd_none(*pgd))
pgd               175 arch/s390/mm/page-states.c 		if (!pgd_folded(*pgd)) {
pgd               176 arch/s390/mm/page-states.c 			page = virt_to_page(pgd_val(*pgd));
pgd               180 arch/s390/mm/page-states.c 		mark_kernel_p4d(pgd, addr, next);
pgd               181 arch/s390/mm/page-states.c 	} while (pgd++, addr = next, addr != MODULES_END);
pgd               262 arch/s390/mm/pageattr.c static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end,
pgd               269 arch/s390/mm/pageattr.c 	p4dp = p4d_offset(pgd, addr);
pgd               341 arch/s390/mm/pageattr.c 	pgd_t *pgd;
pgd               349 arch/s390/mm/pageattr.c 		pgd = pgd_offset_k(address);
pgd               350 arch/s390/mm/pageattr.c 		p4d = p4d_offset(pgd, address);
pgd                92 arch/s390/mm/pgalloc.c 	unsigned long *table, *pgd;
pgd               106 arch/s390/mm/pgalloc.c 		pgd = (unsigned long *) mm->pgd;
pgd               109 arch/s390/mm/pgalloc.c 			p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
pgd               110 arch/s390/mm/pgalloc.c 			mm->pgd = (pgd_t *) table;
pgd               112 arch/s390/mm/pgalloc.c 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
pgd               117 arch/s390/mm/pgalloc.c 			pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
pgd               118 arch/s390/mm/pgalloc.c 			mm->pgd = (pgd_t *) table;
pgd               120 arch/s390/mm/pgalloc.c 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
pgd               133 arch/s390/mm/pgalloc.c 	pgd_t *pgd;
pgd               143 arch/s390/mm/pgalloc.c 	pgd = mm->pgd;
pgd               145 arch/s390/mm/pgalloc.c 	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
pgd               147 arch/s390/mm/pgalloc.c 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
pgd               149 arch/s390/mm/pgalloc.c 	crst_table_free(mm, (unsigned long *) pgd);
pgd               416 arch/s390/mm/pgtable.c 	pgd_t *pgd;
pgd               421 arch/s390/mm/pgtable.c 	pgd = pgd_offset(mm, addr);
pgd               422 arch/s390/mm/pgtable.c 	p4d = p4d_alloc(mm, pgd, addr);
pgd               129 arch/sh/include/asm/mmu_context.h 		set_TTB(next->pgd);
pgd               146 arch/sh/include/asm/mmu_context.h #define set_TTB(pgd)			do { } while (0)
pgd                51 arch/sh/include/asm/mmu_context_32.h static inline void set_TTB(pgd_t *pgd)
pgd                53 arch/sh/include/asm/mmu_context_32.h 	__raw_writel((unsigned long)pgd, MMU_TTB);
pgd                72 arch/sh/include/asm/mmu_context_64.h #define set_TTB(pgd)	(mmu_pdtp_cache = (pgd))
pgd                80 arch/sh/include/asm/page.h typedef struct { unsigned long long pgd; } pgd_t;
pgd                88 arch/sh/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                94 arch/sh/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                99 arch/sh/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                 9 arch/sh/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
pgd               145 arch/sh/include/asm/pgtable.h 				  pgd_t *pgd);
pgd               409 arch/sh/include/asm/pgtable_32.h #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
pgd                50 arch/sh/include/asm/pgtable_64.h #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
pgd               211 arch/sh/mm/cache-sh4.c 	pgd_t *pgd;
pgd               226 arch/sh/mm/cache-sh4.c 	pgd = pgd_offset(vma->vm_mm, address);
pgd               227 arch/sh/mm/cache-sh4.c 	pud = pud_offset(pgd, address);
pgd               385 arch/sh/mm/cache-sh5.c 	pgd_t *pgd;
pgd               396 arch/sh/mm/cache-sh5.c 	pgd = pgd_offset(mm, addr);
pgd               397 arch/sh/mm/cache-sh5.c 	if (pgd_bad(*pgd))
pgd               400 arch/sh/mm/cache-sh5.c 	pud = pud_offset(pgd, addr);
pgd                39 arch/sh/mm/fault.c 	pgd_t *pgd;
pgd                42 arch/sh/mm/fault.c 		pgd = mm->pgd;
pgd                44 arch/sh/mm/fault.c 		pgd = get_TTB();
pgd                46 arch/sh/mm/fault.c 		if (unlikely(!pgd))
pgd                47 arch/sh/mm/fault.c 			pgd = swapper_pg_dir;
pgd                50 arch/sh/mm/fault.c 	printk(KERN_ALERT "pgd = %p\n", pgd);
pgd                51 arch/sh/mm/fault.c 	pgd += pgd_index(addr);
pgd                53 arch/sh/mm/fault.c 	       (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
pgd                60 arch/sh/mm/fault.c 		if (pgd_none(*pgd))
pgd                63 arch/sh/mm/fault.c 		if (pgd_bad(*pgd)) {
pgd                68 arch/sh/mm/fault.c 		pud = pud_offset(pgd, addr);
pgd               106 arch/sh/mm/fault.c static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
pgd               113 arch/sh/mm/fault.c 	pgd += index;
pgd               114 arch/sh/mm/fault.c 	pgd_k = init_mm.pgd + index;
pgd               119 arch/sh/mm/fault.c 	pud = pud_offset(pgd, address);
pgd                28 arch/sh/mm/hugetlbpage.c 	pgd_t *pgd;
pgd                33 arch/sh/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
pgd                34 arch/sh/mm/hugetlbpage.c 	if (pgd) {
pgd                35 arch/sh/mm/hugetlbpage.c 		pud = pud_alloc(mm, pgd, addr);
pgd                49 arch/sh/mm/hugetlbpage.c 	pgd_t *pgd;
pgd                54 arch/sh/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
pgd                55 arch/sh/mm/hugetlbpage.c 	if (pgd) {
pgd                56 arch/sh/mm/hugetlbpage.c 		pud = pud_offset(pgd, addr);
pgd                47 arch/sh/mm/init.c 	pgd_t *pgd;
pgd                51 arch/sh/mm/init.c 	pgd = pgd_offset_k(addr);
pgd                52 arch/sh/mm/init.c 	if (pgd_none(*pgd)) {
pgd                53 arch/sh/mm/init.c 		pgd_ERROR(*pgd);
pgd                57 arch/sh/mm/init.c 	pud = pud_alloc(NULL, pgd, addr);
pgd               167 arch/sh/mm/init.c 	pgd_t *pgd;
pgd               178 arch/sh/mm/init.c 	pgd = pgd_base + i;
pgd               180 arch/sh/mm/init.c 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
pgd               181 arch/sh/mm/init.c 		pud = (pud_t *)pgd;
pgd                14 arch/sh/mm/pgtable.c 	pgd_t *pgd = x;
pgd                16 arch/sh/mm/pgtable.c 	memcpy(pgd + USER_PTRS_PER_PGD,
pgd                38 arch/sh/mm/pgtable.c void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                40 arch/sh/mm/pgtable.c 	kmem_cache_free(pgd_cachep, pgd);
pgd                25 arch/sh/mm/tlbex_32.c 	pgd_t *pgd;
pgd                37 arch/sh/mm/tlbex_32.c 		pgd = pgd_offset_k(address);
pgd                42 arch/sh/mm/tlbex_32.c 		pgd = pgd_offset(current->mm, address);
pgd                45 arch/sh/mm/tlbex_32.c 	pud = pud_offset(pgd, address);
pgd                46 arch/sh/mm/tlbex_64.c 	pgd_t *pgd;
pgd                53 arch/sh/mm/tlbex_64.c 		pgd = pgd_offset_k(address);
pgd                58 arch/sh/mm/tlbex_64.c 		pgd = pgd_offset(current->mm, address);
pgd                61 arch/sh/mm/tlbex_64.c 	pud = pud_offset(pgd, address);
pgd                41 arch/sparc/include/asm/mmu_context_64.h 	__tsb_context_switch(__pa(mm->pgd),
pgd                58 arch/sparc/include/asm/page_32.h typedef struct { unsigned long pgd; } pgd_t;
pgd                66 arch/sparc/include/asm/page_32.h #define pgd_val(x)	((x).pgd)
pgd                73 arch/sparc/include/asm/page_64.h typedef struct { unsigned long pgd; } pgd_t;
pgd                80 arch/sparc/include/asm/page_64.h #define pgd_val(x)	((x).pgd)
pgd                21 arch/sparc/include/asm/pgalloc_32.h static inline void free_pgd_fast(pgd_t *pgd)
pgd                23 arch/sparc/include/asm/pgalloc_32.h 	srmmu_free_nocache(pgd, SRMMU_PGD_TABLE_SIZE);
pgd                26 arch/sparc/include/asm/pgalloc_32.h #define pgd_free(mm, pgd)	free_pgd_fast(pgd)
pgd                19 arch/sparc/include/asm/pgalloc_64.h static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
pgd                21 arch/sparc/include/asm/pgalloc_64.h 	pgd_set(pgd, pud);
pgd                31 arch/sparc/include/asm/pgalloc_64.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                33 arch/sparc/include/asm/pgalloc_64.h 	kmem_cache_free(pgtable_cache, pgd);
pgd               135 arch/sparc/include/asm/pgtable_32.h static inline unsigned long pgd_page_vaddr(pgd_t pgd)
pgd               137 arch/sparc/include/asm/pgtable_32.h 	if (srmmu_device_memory(pgd_val(pgd))) {
pgd               140 arch/sparc/include/asm/pgtable_32.h 		unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
pgd               187 arch/sparc/include/asm/pgtable_32.h static inline int pgd_none(pgd_t pgd)          
pgd               189 arch/sparc/include/asm/pgtable_32.h 	return !(pgd_val(pgd) & 0xFFFFFFF);
pgd               192 arch/sparc/include/asm/pgtable_32.h static inline int pgd_bad(pgd_t pgd)
pgd               194 arch/sparc/include/asm/pgtable_32.h 	return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
pgd               197 arch/sparc/include/asm/pgtable_32.h static inline int pgd_present(pgd_t pgd)
pgd               199 arch/sparc/include/asm/pgtable_32.h 	return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
pgd               316 arch/sparc/include/asm/pgtable_32.h #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
pgd               813 arch/sparc/include/asm/pgtable_64.h #define pgd_none(pgd)			(!pgd_val(pgd))
pgd               815 arch/sparc/include/asm/pgtable_64.h #define pgd_bad(pgd)			(pgd_val(pgd) & ~PAGE_MASK)
pgd               862 arch/sparc/include/asm/pgtable_64.h #define pgd_page_vaddr(pgd)		\
pgd               863 arch/sparc/include/asm/pgtable_64.h 	((unsigned long) __va(pgd_val(pgd)))
pgd               864 arch/sparc/include/asm/pgtable_64.h #define pgd_present(pgd)		(pgd_val(pgd) != 0U)
pgd               868 arch/sparc/include/asm/pgtable_64.h #define pgd_page(pgd)			NULL
pgd               892 arch/sparc/include/asm/pgtable_64.h #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
pgd               901 arch/sparc/kernel/smp_64.c 	if (tp->pgd_paddr == __pa(mm->pgd))
pgd              1623 arch/sparc/kernel/smp_64.c 	pgd_t *pgd = pgd_offset_k(addr);
pgd              1627 arch/sparc/kernel/smp_64.c 	if (pgd_none(*pgd)) {
pgd              1633 arch/sparc/kernel/smp_64.c 		pgd_populate(&init_mm, pgd, new);
pgd              1636 arch/sparc/kernel/smp_64.c 	pud = pud_offset(pgd, addr);
pgd               230 arch/sparc/kernel/unaligned_32.c 			(current->mm ? (unsigned long) current->mm->pgd :
pgd               231 arch/sparc/kernel/unaligned_32.c 			(unsigned long) current->active_mm->pgd));
pgd               282 arch/sparc/kernel/unaligned_64.c 			(current->mm ? (unsigned long) current->mm->pgd :
pgd               283 arch/sparc/kernel/unaligned_64.c 			(unsigned long) current->active_mm->pgd));
pgd                53 arch/sparc/mm/fault_32.c 		(tsk->mm ? (unsigned long) tsk->mm->pgd :
pgd                54 arch/sparc/mm/fault_32.c 			(unsigned long) tsk->active_mm->pgd));
pgd               353 arch/sparc/mm/fault_32.c 		pgd_t *pgd, *pgd_k;
pgd               356 arch/sparc/mm/fault_32.c 		pgd = tsk->active_mm->pgd + offset;
pgd               357 arch/sparc/mm/fault_32.c 		pgd_k = init_mm.pgd + offset;
pgd               359 arch/sparc/mm/fault_32.c 		if (!pgd_present(*pgd)) {
pgd               362 arch/sparc/mm/fault_32.c 			pgd_val(*pgd) = pgd_val(*pgd_k);
pgd               366 arch/sparc/mm/fault_32.c 		pmd = pmd_offset(pgd, address);
pgd                57 arch/sparc/mm/fault_64.c 	       (tsk->mm ? (unsigned long) tsk->mm->pgd :
pgd                58 arch/sparc/mm/fault_64.c 		          (unsigned long) tsk->active_mm->pgd));
pgd               279 arch/sparc/mm/hugetlbpage.c 	pgd_t *pgd;
pgd               283 arch/sparc/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
pgd               284 arch/sparc/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, addr);
pgd               300 arch/sparc/mm/hugetlbpage.c 	pgd_t *pgd;
pgd               304 arch/sparc/mm/hugetlbpage.c 	pgd = pgd_offset(mm, addr);
pgd               305 arch/sparc/mm/hugetlbpage.c 	if (pgd_none(*pgd))
pgd               307 arch/sparc/mm/hugetlbpage.c 	pud = pud_offset(pgd, addr);
pgd               452 arch/sparc/mm/hugetlbpage.c static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pgd               461 arch/sparc/mm/hugetlbpage.c 	pud = pud_offset(pgd, addr);
pgd               484 arch/sparc/mm/hugetlbpage.c 	pud = pud_offset(pgd, start);
pgd               485 arch/sparc/mm/hugetlbpage.c 	pgd_clear(pgd);
pgd               494 arch/sparc/mm/hugetlbpage.c 	pgd_t *pgd;
pgd               513 arch/sparc/mm/hugetlbpage.c 	pgd = pgd_offset(tlb->mm, addr);
pgd               516 arch/sparc/mm/hugetlbpage.c 		if (pgd_none_or_clear_bad(pgd))
pgd               518 arch/sparc/mm/hugetlbpage.c 		hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
pgd               519 arch/sparc/mm/hugetlbpage.c 	} while (pgd++, addr = next, addr != end);
pgd              1655 arch/sparc/mm/init_64.c 	pgd_t *pgd;
pgd              1673 arch/sparc/mm/init_64.c 	pgd = pgd_offset_k(addr);
pgd              1674 arch/sparc/mm/init_64.c 	if (pgd_none(*pgd))
pgd              1677 arch/sparc/mm/init_64.c 	pud = pud_offset(pgd, addr);
pgd              1802 arch/sparc/mm/init_64.c 		pgd_t *pgd = pgd_offset_k(vstart);
pgd              1807 arch/sparc/mm/init_64.c 		if (pgd_none(*pgd)) {
pgd              1815 arch/sparc/mm/init_64.c 			pgd_populate(&init_mm, pgd, new);
pgd              1817 arch/sparc/mm/init_64.c 		pud = pud_offset(pgd, vstart);
pgd              2406 arch/sparc/mm/init_64.c 	init_mm.pgd += ((shift) / (sizeof(pgd_t)));
pgd              2613 arch/sparc/mm/init_64.c 		pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
pgd              2618 arch/sparc/mm/init_64.c 		if (!pgd)
pgd              2621 arch/sparc/mm/init_64.c 		pud = vmemmap_pud_populate(pgd, vstart, node);
pgd                40 arch/sparc/mm/leon_mm.c 	unsigned int pgd, pmd, ped;
pgd                69 arch/sparc/mm/leon_mm.c 	pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
pgd                71 arch/sparc/mm/leon_mm.c 	if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
pgd                75 arch/sparc/mm/leon_mm.c 		pte = pgd;
pgd                76 arch/sparc/mm/leon_mm.c 		paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
pgd                79 arch/sparc/mm/leon_mm.c 	if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
pgd                86 arch/sparc/mm/leon_mm.c 		printk(KERN_INFO "swprobe:  --- pgd (%x) ---\n", pgd);
pgd                88 arch/sparc/mm/leon_mm.c 	ptr = (pgd & SRMMU_PTD_PMASK) << 4;
pgd               298 arch/sparc/mm/srmmu.c 	pgd_t *pgd;
pgd               323 arch/sparc/mm/srmmu.c 	init_mm.pgd = srmmu_swapper_pg_dir;
pgd               331 arch/sparc/mm/srmmu.c 		pgd = pgd_offset_k(vaddr);
pgd               332 arch/sparc/mm/srmmu.c 		pmd = pmd_offset(__nocache_fix(pgd), vaddr);
pgd               352 arch/sparc/mm/srmmu.c 	pgd_t *pgd = NULL;
pgd               354 arch/sparc/mm/srmmu.c 	pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
pgd               355 arch/sparc/mm/srmmu.c 	if (pgd) {
pgd               357 arch/sparc/mm/srmmu.c 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
pgd               358 arch/sparc/mm/srmmu.c 		memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
pgd               362 arch/sparc/mm/srmmu.c 	return pgd;
pgd               502 arch/sparc/mm/srmmu.c 		srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
pgd               908 arch/sparc/mm/srmmu.c 	pgd_t *pgd;
pgd               969 arch/sparc/mm/srmmu.c 	pgd = pgd_offset_k(PKMAP_BASE);
pgd               970 arch/sparc/mm/srmmu.c 	pmd = pmd_offset(pgd, PKMAP_BASE);
pgd                39 arch/um/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                58 arch/um/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                84 arch/um/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                28 arch/um/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
pgd                36 arch/um/include/asm/pgtable-2level.h static inline int pgd_newpage(pgd_t pgd)	{ return 0; }
pgd                37 arch/um/include/asm/pgtable-2level.h static inline void pgd_mkuptodate(pgd_t pgd)	{ }
pgd                69 arch/um/include/asm/pgtable-3level.h static inline int pgd_newpage(pgd_t pgd)
pgd                71 arch/um/include/asm/pgtable-3level.h 	return(pgd_val(pgd) & _PAGE_NEWPAGE);
pgd                74 arch/um/include/asm/pgtable-3level.h static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
pgd               313 arch/um/include/asm/pgtable.h #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
pgd                98 arch/um/kernel/mem.c 	pgd_t *pgd;
pgd               107 arch/um/kernel/mem.c 	pgd = pgd_base + i;
pgd               109 arch/um/kernel/mem.c 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pgd               110 arch/um/kernel/mem.c 		pud = pud_offset(pgd, vaddr);
pgd               126 arch/um/kernel/mem.c 	pgd_t *pgd;
pgd               146 arch/um/kernel/mem.c 		pgd = swapper_pg_dir + pgd_index(vaddr);
pgd               147 arch/um/kernel/mem.c 		pud = pud_offset(pgd, vaddr);
pgd               196 arch/um/kernel/mem.c 	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
pgd               198 arch/um/kernel/mem.c 	if (pgd) {
pgd               199 arch/um/kernel/mem.c 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
pgd               200 arch/um/kernel/mem.c 		memcpy(pgd + USER_PTRS_PER_PGD,
pgd               204 arch/um/kernel/mem.c 	return pgd;
pgd               207 arch/um/kernel/mem.c void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd               209 arch/um/kernel/mem.c 	free_page((unsigned long) pgd);
pgd                21 arch/um/kernel/skas/mmu.c 	pgd_t *pgd;
pgd                26 arch/um/kernel/skas/mmu.c 	pgd = pgd_offset(mm, proc);
pgd                27 arch/um/kernel/skas/mmu.c 	pud = pud_alloc(mm, pgd, proc);
pgd                19 arch/um/kernel/skas/uaccess.c 	pgd_t *pgd;
pgd                26 arch/um/kernel/skas/uaccess.c 	pgd = pgd_offset(mm, addr);
pgd                27 arch/um/kernel/skas/uaccess.c 	if (!pgd_present(*pgd))
pgd                30 arch/um/kernel/skas/uaccess.c 	pud = pud_offset(pgd, addr);
pgd               280 arch/um/kernel/tlb.c static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
pgd               288 arch/um/kernel/tlb.c 	pud = pud_offset(pgd, addr);
pgd               305 arch/um/kernel/tlb.c 	pgd_t *pgd;
pgd               311 arch/um/kernel/tlb.c 	pgd = pgd_offset(mm, addr);
pgd               314 arch/um/kernel/tlb.c 		if (!pgd_present(*pgd)) {
pgd               315 arch/um/kernel/tlb.c 			if (force || pgd_newpage(*pgd)) {
pgd               317 arch/um/kernel/tlb.c 				pgd_mkuptodate(*pgd);
pgd               320 arch/um/kernel/tlb.c 		else ret = update_pud_range(pgd, addr, next, &hvc);
pgd               321 arch/um/kernel/tlb.c 	} while (pgd++, addr = next, ((addr < end_addr) && !ret));
pgd               340 arch/um/kernel/tlb.c 	pgd_t *pgd;
pgd               351 arch/um/kernel/tlb.c 		pgd = pgd_offset(mm, addr);
pgd               352 arch/um/kernel/tlb.c 		if (!pgd_present(*pgd)) {
pgd               356 arch/um/kernel/tlb.c 			if (pgd_newpage(*pgd)) {
pgd               367 arch/um/kernel/tlb.c 		pud = pud_offset(pgd, addr);
pgd               426 arch/um/kernel/tlb.c 	pgd_t *pgd;
pgd               436 arch/um/kernel/tlb.c 	pgd = pgd_offset(mm, address);
pgd               437 arch/um/kernel/tlb.c 	if (!pgd_present(*pgd))
pgd               440 arch/um/kernel/tlb.c 	pud = pud_offset(pgd, address);
pgd               498 arch/um/kernel/tlb.c pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
pgd               500 arch/um/kernel/tlb.c 	return pud_offset(pgd, address);
pgd               515 arch/um/kernel/tlb.c 	pgd_t *pgd = pgd_offset(task->mm, addr);
pgd               516 arch/um/kernel/tlb.c 	pud_t *pud = pud_offset(pgd, addr);
pgd                30 arch/um/kernel/trap.c 	pgd_t *pgd;
pgd               106 arch/um/kernel/trap.c 		pgd = pgd_offset(mm, address);
pgd               107 arch/um/kernel/trap.c 		pud = pud_offset(pgd, address);
pgd                18 arch/unicore32/include/asm/cpu-single.h #define cpu_switch_mm(pgd, mm) cpu_do_switch_mm(virt_to_phys(pgd), mm)
pgd                52 arch/unicore32/include/asm/mmu_context.h 		cpu_switch_mm(next->pgd, next);
pgd                35 arch/unicore32/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;
pgd                39 arch/unicore32/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                25 arch/unicore32/include/asm/pgalloc.h extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
pgd                28 arch/unicore32/include/asm/pgalloc.h #define pgd_free(mm, pgd)		free_pgd_slow(mm, pgd)
pgd                51 arch/unicore32/include/asm/pgtable.h #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
pgd               231 arch/unicore32/include/asm/pgtable.h #define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr))
pgd                33 arch/unicore32/kernel/hibernate.c static pmd_t *resume_one_md_table_init(pgd_t *pgd)
pgd                38 arch/unicore32/kernel/hibernate.c 	pud = pud_offset(pgd, 0);
pgd                73 arch/unicore32/kernel/hibernate.c 	pgd_t *pgd;
pgd                79 arch/unicore32/kernel/hibernate.c 	pgd = pgd_base + pgd_idx;
pgd                82 arch/unicore32/kernel/hibernate.c 	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
pgd                83 arch/unicore32/kernel/hibernate.c 		pmd = resume_one_md_table_init(pgd);
pgd                40 arch/unicore32/mm/fault.c 	pgd_t *pgd;
pgd                45 arch/unicore32/mm/fault.c 	printk(KERN_ALERT "pgd = %p\n", mm->pgd);
pgd                46 arch/unicore32/mm/fault.c 	pgd = pgd_offset(mm, addr);
pgd                47 arch/unicore32/mm/fault.c 	printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
pgd                53 arch/unicore32/mm/fault.c 		if (pgd_none(*pgd))
pgd                56 arch/unicore32/mm/fault.c 		if (pgd_bad(*pgd)) {
pgd                61 arch/unicore32/mm/fault.c 		pmd = pmd_offset((pud_t *) pgd, addr);
pgd               339 arch/unicore32/mm/fault.c 	pgd_t *pgd, *pgd_k;
pgd               350 arch/unicore32/mm/fault.c 	pgd = cpu_get_pgd() + index;
pgd               351 arch/unicore32/mm/fault.c 	pgd_k = init_mm.pgd + index;
pgd               357 arch/unicore32/mm/fault.c 	pmd = pmd_offset((pud_t *) pgd, addr);
pgd                65 arch/unicore32/mm/ioremap.c 	pgd_t *pgd;
pgd                68 arch/unicore32/mm/ioremap.c 	pgd = pgd_offset_k(addr);
pgd                70 arch/unicore32/mm/ioremap.c 		pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);
pgd                91 arch/unicore32/mm/ioremap.c 		pgd++;
pgd               102 arch/unicore32/mm/ioremap.c 	pgd_t *pgd;
pgd               110 arch/unicore32/mm/ioremap.c 	pgd = pgd_offset_k(addr);
pgd               112 arch/unicore32/mm/ioremap.c 		pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
pgd               119 arch/unicore32/mm/ioremap.c 		pgd++;
pgd                17 arch/unicore32/mm/mm.h static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
pgd                19 arch/unicore32/mm/mm.h 	return pmd_offset((pud_t *)pgd, virt);
pgd               169 arch/unicore32/mm/mmu.c static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
pgd               173 arch/unicore32/mm/mmu.c 	pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
pgd               207 arch/unicore32/mm/mmu.c 	pgd_t *pgd;
pgd               236 arch/unicore32/mm/mmu.c 	pgd = pgd_offset_k(addr);
pgd               241 arch/unicore32/mm/mmu.c 		alloc_init_section(pgd, addr, next, phys, type);
pgd               245 arch/unicore32/mm/mmu.c 	} while (pgd++, addr != end);
pgd               452 arch/unicore32/mm/mmu.c 	pgd_t *pgd;
pgd               460 arch/unicore32/mm/mmu.c 	pgd = current->active_mm->pgd;
pgd               464 arch/unicore32/mm/mmu.c 	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
pgd               468 arch/unicore32/mm/mmu.c 		pmd = pmd_off(pgd, i << PGDIR_SHIFT);
pgd                76 arch/unicore32/mm/pgd.c void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
pgd                81 arch/unicore32/mm/pgd.c 	if (!pgd)
pgd                85 arch/unicore32/mm/pgd.c 	pmd = pmd_off(pgd, 0);
pgd               101 arch/unicore32/mm/pgd.c 	free_pages((unsigned long) pgd, 0);
pgd               359 arch/x86/entry/vsyscall/vsyscall_64.c 	pgd_t *pgd;
pgd               364 arch/x86/entry/vsyscall/vsyscall_64.c 	pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
pgd               365 arch/x86/entry/vsyscall/vsyscall_64.c 	set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
pgd               366 arch/x86/entry/vsyscall/vsyscall_64.c 	p4d = p4d_offset(pgd, VSYSCALL_ADDR);
pgd                89 arch/x86/hyperv/mmu.c 		flush->address_space = virt_to_phys(info->mm->pgd);
pgd               185 arch/x86/hyperv/mmu.c 		flush->address_space = virt_to_phys(info->mm->pgd);
pgd               143 arch/x86/include/asm/kexec.h 	pgd_t *pgd;
pgd               350 arch/x86/include/asm/mmu_context.h 	unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
pgd               319 arch/x86/include/asm/paravirt.h static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd               321 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.pgd_free, mm, pgd);
pgd               399 arch/x86/include/asm/paravirt.h static inline pgdval_t pgd_val(pgd_t pgd)
pgd               405 arch/x86/include/asm/paravirt.h 				    pgd.pgd, (u64)pgd.pgd >> 32);
pgd               407 arch/x86/include/asm/paravirt.h 		ret =  PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
pgd               539 arch/x86/include/asm/paravirt.h static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
pgd               541 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
pgd               548 arch/x86/include/asm/paravirt.h 		set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });	\
pgd               232 arch/x86/include/asm/paravirt_types.h 	void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
pgd                18 arch/x86/include/asm/pgalloc.h static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
pgd                51 arch/x86/include/asm/pgalloc.h extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
pgd               174 arch/x86/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
pgd               179 arch/x86/include/asm/pgalloc.h 	set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
pgd               182 arch/x86/include/asm/pgalloc.h static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
pgd               187 arch/x86/include/asm/pgalloc.h 	set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
pgd               104 arch/x86/include/asm/pgtable-3level.h 	pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
pgd               235 arch/x86/include/asm/pgtable-3level.h 	pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0));
pgd                32 arch/x86/include/asm/pgtable.h void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
pgd                33 arch/x86/include/asm/pgtable.h void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
pgd                72 arch/x86/include/asm/pgtable.h #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
pgd                73 arch/x86/include/asm/pgtable.h #define pgd_clear(pgd)			(pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
pgd               237 arch/x86/include/asm/pgtable.h static inline unsigned long pgd_pfn(pgd_t pgd)
pgd               239 arch/x86/include/asm/pgtable.h 	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
pgd               293 arch/x86/include/asm/pgtable.h static inline int pgd_devmap(pgd_t pgd)
pgd               688 arch/x86/include/asm/pgtable.h pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
pgd               695 arch/x86/include/asm/pgtable.h static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
pgd               698 arch/x86/include/asm/pgtable.h 		return pgd;
pgd               699 arch/x86/include/asm/pgtable.h 	return __pti_set_user_pgtbl(pgdp, pgd);
pgd               702 arch/x86/include/asm/pgtable.h static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
pgd               704 arch/x86/include/asm/pgtable.h 	return pgd;
pgd               948 arch/x86/include/asm/pgtable.h static inline int pgd_present(pgd_t pgd)
pgd               952 arch/x86/include/asm/pgtable.h 	return pgd_flags(pgd) & _PAGE_PRESENT;
pgd               955 arch/x86/include/asm/pgtable.h static inline unsigned long pgd_page_vaddr(pgd_t pgd)
pgd               957 arch/x86/include/asm/pgtable.h 	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
pgd               964 arch/x86/include/asm/pgtable.h #define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
pgd               967 arch/x86/include/asm/pgtable.h static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
pgd               970 arch/x86/include/asm/pgtable.h 		return (p4d_t *)pgd;
pgd               971 arch/x86/include/asm/pgtable.h 	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
pgd               974 arch/x86/include/asm/pgtable.h static inline int pgd_bad(pgd_t pgd)
pgd               984 arch/x86/include/asm/pgtable.h 	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
pgd               987 arch/x86/include/asm/pgtable.h static inline int pgd_none(pgd_t pgd)
pgd               997 arch/x86/include/asm/pgtable.h 	return !native_pgd_val(pgd);
pgd              1015 arch/x86/include/asm/pgtable.h #define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
pgd              1019 arch/x86/include/asm/pgtable.h #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
pgd              1240 arch/x86/include/asm/pgtable.h static inline int pgd_large(pgd_t pgd) { return 0; }
pgd               138 arch/x86/include/asm/pgtable_64.h 	pgd_t pgd;
pgd               145 arch/x86/include/asm/pgtable_64.h 	pgd = native_make_pgd(native_p4d_val(p4d));
pgd               146 arch/x86/include/asm/pgtable_64.h 	pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
pgd               147 arch/x86/include/asm/pgtable_64.h 	WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
pgd               155 arch/x86/include/asm/pgtable_64.h static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
pgd               157 arch/x86/include/asm/pgtable_64.h 	WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
pgd               160 arch/x86/include/asm/pgtable_64.h static inline void native_pgd_clear(pgd_t *pgd)
pgd               162 arch/x86/include/asm/pgtable_64.h 	native_set_pgd(pgd, native_make_pgd(0));
pgd               267 arch/x86/include/asm/pgtable_types.h typedef struct { pgdval_t pgd; } pgd_t;
pgd               297 arch/x86/include/asm/pgtable_types.h static inline pgdval_t native_pgd_val(pgd_t pgd)
pgd               299 arch/x86/include/asm/pgtable_types.h 	return pgd.pgd & PGD_ALLOWED_BITS;
pgd               302 arch/x86/include/asm/pgtable_types.h static inline pgdval_t pgd_flags(pgd_t pgd)
pgd               304 arch/x86/include/asm/pgtable_types.h 	return native_pgd_val(pgd) & PTE_FLAGS_MASK;
pgd               324 arch/x86/include/asm/pgtable_types.h 	return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) };
pgd               329 arch/x86/include/asm/pgtable_types.h 	return native_pgd_val(p4d.pgd);
pgd               350 arch/x86/include/asm/pgtable_types.h 	return (pud_t) { .p4d.pgd = native_make_pgd(val) };
pgd               355 arch/x86/include/asm/pgtable_types.h 	return native_pgd_val(pud.p4d.pgd);
pgd               376 arch/x86/include/asm/pgtable_types.h 	return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
pgd               381 arch/x86/include/asm/pgtable_types.h 	return native_pgd_val(pmd.pud.p4d.pgd);
pgd               562 arch/x86/include/asm/pgtable_types.h extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
pgd               566 arch/x86/include/asm/pgtable_types.h extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
pgd               570 arch/x86/include/asm/pgtable_types.h extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
pgd               121 arch/x86/include/asm/tlbflush.h static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
pgd               124 arch/x86/include/asm/tlbflush.h 		return __sme_pa(pgd) | kern_pcid(asid);
pgd               127 arch/x86/include/asm/tlbflush.h 		return __sme_pa(pgd);
pgd               131 arch/x86/include/asm/tlbflush.h static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
pgd               140 arch/x86/include/asm/tlbflush.h 	return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
pgd               272 arch/x86/include/asm/tlbflush.h 	VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
pgd               342 arch/x86/include/asm/xen/page.h #define pud_val_ma(v) ((v).p4d.pgd.pgd)
pgd               349 arch/x86/include/asm/xen/page.h #define p4d_val_ma(x)	((x).pgd.pgd)
pgd               116 arch/x86/kernel/espfix_64.c 	pgd_t *pgd;
pgd               120 arch/x86/kernel/espfix_64.c 	pgd = &init_top_pgt[pgd_index(ESPFIX_BASE_ADDR)];
pgd               121 arch/x86/kernel/espfix_64.c 	p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
pgd                89 arch/x86/kernel/head32.c #define SET_PL2(pl2, val)   { (pl2).pgd = (val); }
pgd               119 arch/x86/kernel/head64.c 	pgdval_t *pgd;
pgd               152 arch/x86/kernel/head64.c 	pgd = fixup_pointer(&early_top_pgt, physaddr);
pgd               153 arch/x86/kernel/head64.c 	p = pgd + pgd_index(__START_KERNEL_map);
pgd               191 arch/x86/kernel/head64.c 		pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
pgd               192 arch/x86/kernel/head64.c 		pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
pgd               199 arch/x86/kernel/head64.c 		pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
pgd               200 arch/x86/kernel/head64.c 		pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
pgd               303 arch/x86/kernel/head64.c 	pgdval_t pgd, *pgd_p;
pgd               313 arch/x86/kernel/head64.c 	pgd_p = &early_top_pgt[pgd_index(address)].pgd;
pgd               314 arch/x86/kernel/head64.c 	pgd = *pgd_p;
pgd               323 arch/x86/kernel/head64.c 	else if (pgd)
pgd               324 arch/x86/kernel/head64.c 		p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
pgd               131 arch/x86/kernel/ldt.c static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va)
pgd               136 arch/x86/kernel/ldt.c 	if (pgd->pgd == 0)
pgd               139 arch/x86/kernel/ldt.c 	p4d = p4d_offset(pgd, va);
pgd               182 arch/x86/kernel/ldt.c 	pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
pgd               185 arch/x86/kernel/ldt.c 		set_pgd(kernel_to_user_pgdp(pgd), *pgd);
pgd               190 arch/x86/kernel/ldt.c 	pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
pgd               191 arch/x86/kernel/ldt.c 	bool had_kernel = (pgd->pgd != 0);
pgd               192 arch/x86/kernel/ldt.c 	bool had_user   = (kernel_to_user_pgdp(pgd)->pgd != 0);
pgd                57 arch/x86/kernel/machine_kexec_32.c 	free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER);
pgd                58 arch/x86/kernel/machine_kexec_32.c 	image->arch.pgd = NULL;
pgd                73 arch/x86/kernel/machine_kexec_32.c 	image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
pgd                81 arch/x86/kernel/machine_kexec_32.c 	if (!image->arch.pgd ||
pgd                92 arch/x86/kernel/machine_kexec_32.c 	pgd_t *pgd, pmd_t *pmd, pte_t *pte,
pgd                98 arch/x86/kernel/machine_kexec_32.c 	pgd += pgd_index(vaddr);
pgd               100 arch/x86/kernel/machine_kexec_32.c 	if (!(pgd_val(*pgd) & _PAGE_PRESENT))
pgd               101 arch/x86/kernel/machine_kexec_32.c 		set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT));
pgd               103 arch/x86/kernel/machine_kexec_32.c 	p4d = p4d_offset(pgd, vaddr);
pgd               122 arch/x86/kernel/machine_kexec_32.c 		image->arch.pgd, pmd, image->arch.pte0,
pgd               128 arch/x86/kernel/machine_kexec_32.c 		image->arch.pgd, pmd, image->arch.pte1,
pgd               215 arch/x86/kernel/machine_kexec_32.c 	page_list[PA_PGD] = __pa(image->arch.pgd);
pgd               124 arch/x86/kernel/machine_kexec_64.c static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
pgd               136 arch/x86/kernel/machine_kexec_64.c 	pgd += pgd_index(vaddr);
pgd               137 arch/x86/kernel/machine_kexec_64.c 	if (!pgd_present(*pgd)) {
pgd               142 arch/x86/kernel/machine_kexec_64.c 		set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
pgd               144 arch/x86/kernel/machine_kexec_64.c 	p4d = p4d_offset(pgd, vaddr);
pgd                91 arch/x86/kernel/tboot.c 	.pgd            = swapper_pg_dir,
pgd               107 arch/x86/kernel/tboot.c 	pgd_t *pgd;
pgd               113 arch/x86/kernel/tboot.c 	pgd = pgd_offset(&tboot_mm, vaddr);
pgd               114 arch/x86/kernel/tboot.c 	p4d = p4d_alloc(&tboot_mm, pgd, vaddr);
pgd               137 arch/x86/kernel/tboot.c 	pgd->pgd &= ~_PAGE_NX;
pgd               168 arch/x86/kernel/vm86_32.c 	pgd_t *pgd;
pgd               176 arch/x86/kernel/vm86_32.c 	pgd = pgd_offset(mm, 0xA0000);
pgd               177 arch/x86/kernel/vm86_32.c 	if (pgd_none_or_clear_bad(pgd))
pgd               179 arch/x86/kernel/vm86_32.c 	p4d = p4d_offset(pgd, 0xA0000);
pgd                18 arch/x86/mm/debug_pagetables.c 	if (current->mm->pgd) {
pgd                20 arch/x86/mm/debug_pagetables.c 		ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false);
pgd                31 arch/x86/mm/debug_pagetables.c 	if (current->mm->pgd) {
pgd                33 arch/x86/mm/debug_pagetables.c 		ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true);
pgd                45 arch/x86/mm/debug_pagetables.c 	if (efi_mm.pgd)
pgd                46 arch/x86/mm/debug_pagetables.c 		ptdump_walk_pgd_level_debugfs(m, efi_mm.pgd, false);
pgd               518 arch/x86/mm/dump_pagetables.c static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
pgd               526 arch/x86/mm/dump_pagetables.c 	if (pgd) {
pgd               527 arch/x86/mm/dump_pagetables.c 		start = pgd;
pgd               569 arch/x86/mm/dump_pagetables.c void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
pgd               571 arch/x86/mm/dump_pagetables.c 	ptdump_walk_pgd_level_core(m, pgd, false, true);
pgd               574 arch/x86/mm/dump_pagetables.c void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
pgd               578 arch/x86/mm/dump_pagetables.c 		pgd = kernel_to_user_pgdp(pgd);
pgd               580 arch/x86/mm/dump_pagetables.c 	ptdump_walk_pgd_level_core(m, pgd, false, false);
pgd               587 arch/x86/mm/dump_pagetables.c 	pgd_t *pgd = INIT_PGD;
pgd               594 arch/x86/mm/dump_pagetables.c 	pgd = kernel_to_user_pgdp(pgd);
pgd               595 arch/x86/mm/dump_pagetables.c 	ptdump_walk_pgd_level_core(NULL, pgd, true, false);
pgd               149 arch/x86/mm/fault.c static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
pgd               157 arch/x86/mm/fault.c 	pgd += index;
pgd               158 arch/x86/mm/fault.c 	pgd_k = init_mm.pgd + index;
pgd               168 arch/x86/mm/fault.c 	p4d = p4d_offset(pgd, address);
pgd               294 arch/x86/mm/fault.c 	pgd_t *pgd = &base[pgd_index(address)];
pgd               301 arch/x86/mm/fault.c 	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
pgd               302 arch/x86/mm/fault.c 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
pgd               308 arch/x86/mm/fault.c 	p4d = p4d_offset(pgd, address);
pgd               355 arch/x86/mm/fault.c 	pgd_t *pgd, *pgd_k;
pgd               370 arch/x86/mm/fault.c 	pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
pgd               376 arch/x86/mm/fault.c 		if (pgd_none(*pgd)) {
pgd               377 arch/x86/mm/fault.c 			set_pgd(pgd, *pgd_k);
pgd               380 arch/x86/mm/fault.c 			BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
pgd               385 arch/x86/mm/fault.c 	p4d = p4d_offset(pgd, address);
pgd               449 arch/x86/mm/fault.c 	pgd_t *pgd = base + pgd_index(address);
pgd               455 arch/x86/mm/fault.c 	if (bad_address(pgd))
pgd               458 arch/x86/mm/fault.c 	pr_info("PGD %lx ", pgd_val(*pgd));
pgd               460 arch/x86/mm/fault.c 	if (!pgd_present(*pgd))
pgd               463 arch/x86/mm/fault.c 	p4d = p4d_offset(pgd, address);
pgd               615 arch/x86/mm/fault.c 		pgd_t *pgd;
pgd               618 arch/x86/mm/fault.c 		pgd = __va(read_cr3_pa());
pgd               619 arch/x86/mm/fault.c 		pgd += pgd_index(address);
pgd               621 arch/x86/mm/fault.c 		pte = lookup_address_in_pgd(pgd, address, &level);
pgd               627 arch/x86/mm/fault.c 				(pgd_flags(*pgd) & _PAGE_USER) &&
pgd              1123 arch/x86/mm/fault.c 	pgd_t *pgd;
pgd              1143 arch/x86/mm/fault.c 	pgd = init_mm.pgd + pgd_index(address);
pgd              1144 arch/x86/mm/fault.c 	if (!pgd_present(*pgd))
pgd              1147 arch/x86/mm/fault.c 	p4d = p4d_offset(pgd, address);
pgd               105 arch/x86/mm/ident_map.c 		pgd_t *pgd = pgd_page + pgd_index(addr);
pgd               112 arch/x86/mm/ident_map.c 		if (pgd_present(*pgd)) {
pgd               113 arch/x86/mm/ident_map.c 			p4d = p4d_offset(pgd, 0);
pgd               127 arch/x86/mm/ident_map.c 			set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
pgd               134 arch/x86/mm/ident_map.c 			set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
pgd                67 arch/x86/mm/init_32.c static pmd_t * __init one_md_table_init(pgd_t *pgd)
pgd                74 arch/x86/mm/init_32.c 	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
pgd                77 arch/x86/mm/init_32.c 		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
pgd                78 arch/x86/mm/init_32.c 		p4d = p4d_offset(pgd, 0);
pgd                85 arch/x86/mm/init_32.c 	p4d = p4d_offset(pgd, 0);
pgd               212 arch/x86/mm/init_32.c 	pgd_t *pgd;
pgd               224 arch/x86/mm/init_32.c 	pgd = pgd_base + pgd_idx;
pgd               226 arch/x86/mm/init_32.c 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
pgd               227 arch/x86/mm/init_32.c 		pmd = one_md_table_init(pgd);
pgd               263 arch/x86/mm/init_32.c 	pgd_t *pgd;
pgd               295 arch/x86/mm/init_32.c 	pgd = pgd_base + pgd_idx;
pgd               296 arch/x86/mm/init_32.c 	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
pgd               297 arch/x86/mm/init_32.c 		pmd = one_md_table_init(pgd);
pgd               395 arch/x86/mm/init_32.c 	pgd_t *pgd = pgd_offset_k(vaddr);
pgd               396 arch/x86/mm/init_32.c 	p4d_t *p4d = p4d_offset(pgd, vaddr);
pgd               417 arch/x86/mm/init_32.c 	pgd_t *pgd;
pgd               426 arch/x86/mm/init_32.c 	pgd = swapper_pg_dir + pgd_index(vaddr);
pgd               427 arch/x86/mm/init_32.c 	p4d = p4d_offset(pgd, vaddr);
pgd               474 arch/x86/mm/init_32.c 	pgd_t *pgd, *base = swapper_pg_dir;
pgd               491 arch/x86/mm/init_32.c 		pgd = base + pgd_index(va);
pgd               492 arch/x86/mm/init_32.c 		if (!pgd_present(*pgd))
pgd               495 arch/x86/mm/init_32.c 		p4d = p4d_offset(pgd, va);
pgd                73 arch/x86/mm/init_64.c DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
pgd               144 arch/x86/mm/init_64.c 			pgd_t *pgd;
pgd               147 arch/x86/mm/init_64.c 			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
pgd               152 arch/x86/mm/init_64.c 			if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
pgd               153 arch/x86/mm/init_64.c 				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
pgd               155 arch/x86/mm/init_64.c 			if (pgd_none(*pgd))
pgd               156 arch/x86/mm/init_64.c 				set_pgd(pgd, *pgd_ref);
pgd               185 arch/x86/mm/init_64.c 			pgd_t *pgd;
pgd               189 arch/x86/mm/init_64.c 			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
pgd               190 arch/x86/mm/init_64.c 			p4d = p4d_offset(pgd, addr);
pgd               243 arch/x86/mm/init_64.c static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
pgd               245 arch/x86/mm/init_64.c 	if (pgd_none(*pgd)) {
pgd               247 arch/x86/mm/init_64.c 		pgd_populate(&init_mm, pgd, p4d);
pgd               248 arch/x86/mm/init_64.c 		if (p4d != p4d_offset(pgd, 0))
pgd               250 arch/x86/mm/init_64.c 			       p4d, p4d_offset(pgd, 0));
pgd               252 arch/x86/mm/init_64.c 	return p4d_offset(pgd, vaddr);
pgd               321 arch/x86/mm/init_64.c 	pgd_t *pgd;
pgd               326 arch/x86/mm/init_64.c 	pgd = pgd_offset_k(vaddr);
pgd               327 arch/x86/mm/init_64.c 	if (pgd_none(*pgd)) {
pgd               333 arch/x86/mm/init_64.c 	p4d_page = p4d_offset(pgd, 0);
pgd               339 arch/x86/mm/init_64.c 	pgd_t *pgd;
pgd               343 arch/x86/mm/init_64.c 	pgd = pgd_offset_k(vaddr);
pgd               344 arch/x86/mm/init_64.c 	p4d = fill_p4d(pgd, vaddr);
pgd               363 arch/x86/mm/init_64.c 	pgd_t *pgd;
pgd               373 arch/x86/mm/init_64.c 		pgd = pgd_offset_k((unsigned long)__va(phys));
pgd               374 arch/x86/mm/init_64.c 		if (pgd_none(*pgd)) {
pgd               376 arch/x86/mm/init_64.c 			set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
pgd               379 arch/x86/mm/init_64.c 		p4d = p4d_offset(pgd, (unsigned long)__va(phys));
pgd               736 arch/x86/mm/init_64.c 		pgd_t *pgd = pgd_offset_k(vaddr);
pgd               741 arch/x86/mm/init_64.c 		if (pgd_val(*pgd)) {
pgd               742 arch/x86/mm/init_64.c 			p4d = (p4d_t *)pgd_page_vaddr(*pgd);
pgd               756 arch/x86/mm/init_64.c 			pgd_populate_init(&init_mm, pgd, p4d, init);
pgd               758 arch/x86/mm/init_64.c 			p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr),
pgd              1178 arch/x86/mm/init_64.c 	pgd_t *pgd;
pgd              1184 arch/x86/mm/init_64.c 		pgd = pgd_offset_k(addr);
pgd              1185 arch/x86/mm/init_64.c 		if (!pgd_present(*pgd))
pgd              1188 arch/x86/mm/init_64.c 		p4d = p4d_offset(pgd, 0);
pgd              1344 arch/x86/mm/init_64.c 	pgd_t *pgd;
pgd              1353 arch/x86/mm/init_64.c 	pgd = pgd_offset_k(addr);
pgd              1354 arch/x86/mm/init_64.c 	if (pgd_none(*pgd))
pgd              1357 arch/x86/mm/init_64.c 	p4d = p4d_offset(pgd, addr);
pgd              1454 arch/x86/mm/init_64.c 	pgd_t *pgd;
pgd              1462 arch/x86/mm/init_64.c 		pgd = vmemmap_pgd_populate(addr, node);
pgd              1463 arch/x86/mm/init_64.c 		if (!pgd)
pgd              1466 arch/x86/mm/init_64.c 		p4d = vmemmap_p4d_populate(pgd, addr, node);
pgd              1541 arch/x86/mm/init_64.c 	pgd_t *pgd;
pgd              1551 arch/x86/mm/init_64.c 		pgd = pgd_offset_k(addr);
pgd              1552 arch/x86/mm/init_64.c 		if (pgd_none(*pgd)) {
pgd              1556 arch/x86/mm/init_64.c 		get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
pgd              1558 arch/x86/mm/init_64.c 		p4d = p4d_offset(pgd, addr);
pgd               805 arch/x86/mm/ioremap.c 	pgd_t *pgd = &base[pgd_index(addr)];
pgd               806 arch/x86/mm/ioremap.c 	p4d_t *p4d = p4d_offset(pgd, addr);
pgd               126 arch/x86/mm/kasan_init_64.c static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
pgd               133 arch/x86/mm/kasan_init_64.c 	if (pgd_none(*pgd)) {
pgd               135 arch/x86/mm/kasan_init_64.c 		pgd_populate(&init_mm, pgd, p);
pgd               138 arch/x86/mm/kasan_init_64.c 	p4d = p4d_offset(pgd, addr);
pgd               148 arch/x86/mm/kasan_init_64.c 	pgd_t *pgd;
pgd               153 arch/x86/mm/kasan_init_64.c 	pgd = pgd_offset_k(addr);
pgd               156 arch/x86/mm/kasan_init_64.c 		kasan_populate_pgd(pgd, addr, next, nid);
pgd               157 arch/x86/mm/kasan_init_64.c 	} while (pgd++, addr = next, addr != end);
pgd               174 arch/x86/mm/kasan_init_64.c 	pgd_t *pgd;
pgd               179 arch/x86/mm/kasan_init_64.c 		pgd = pgd_offset_k(start);
pgd               185 arch/x86/mm/kasan_init_64.c 			pgd_clear(pgd);
pgd               187 arch/x86/mm/kasan_init_64.c 			p4d_clear(p4d_offset(pgd, start));
pgd               190 arch/x86/mm/kasan_init_64.c 	pgd = pgd_offset_k(start);
pgd               192 arch/x86/mm/kasan_init_64.c 		p4d_clear(p4d_offset(pgd, start));
pgd               195 arch/x86/mm/kasan_init_64.c static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
pgd               200 arch/x86/mm/kasan_init_64.c 		return (p4d_t *)pgd;
pgd               202 arch/x86/mm/kasan_init_64.c 	p4d = pgd_val(*pgd) & PTE_PFN_MASK;
pgd               207 arch/x86/mm/kasan_init_64.c static void __init kasan_early_p4d_populate(pgd_t *pgd,
pgd               215 arch/x86/mm/kasan_init_64.c 	if (pgd_none(*pgd)) {
pgd               218 arch/x86/mm/kasan_init_64.c 		set_pgd(pgd, pgd_entry);
pgd               221 arch/x86/mm/kasan_init_64.c 	p4d = early_p4d_offset(pgd, addr);
pgd               234 arch/x86/mm/kasan_init_64.c static void __init kasan_map_early_shadow(pgd_t *pgd)
pgd               241 arch/x86/mm/kasan_init_64.c 	pgd += pgd_index(addr);
pgd               244 arch/x86/mm/kasan_init_64.c 		kasan_early_p4d_populate(pgd, addr, next);
pgd               245 arch/x86/mm/kasan_init_64.c 	} while (pgd++, addr = next, addr != end);
pgd               156 arch/x86/mm/kaslr.c 	pgd_t *pgd;
pgd               169 arch/x86/mm/kaslr.c 	pgd = pgd_offset_k(vaddr);
pgd               171 arch/x86/mm/kaslr.c 	p4d = p4d_offset(pgd, vaddr);
pgd                63 arch/x86/mm/mem_encrypt_identity.c 	pgd_t   *pgd;
pgd               100 arch/x86/mm/mem_encrypt_identity.c 	pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
pgd               107 arch/x86/mm/mem_encrypt_identity.c 	pgd_t *pgd;
pgd               112 arch/x86/mm/mem_encrypt_identity.c 	pgd = ppd->pgd + pgd_index(ppd->vaddr);
pgd               113 arch/x86/mm/mem_encrypt_identity.c 	if (pgd_none(*pgd)) {
pgd               117 arch/x86/mm/mem_encrypt_identity.c 		set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
pgd               120 arch/x86/mm/mem_encrypt_identity.c 	p4d = p4d_offset(pgd, ppd->vaddr);
pgd               383 arch/x86/mm/mem_encrypt_identity.c 	ppd.pgd = (pgd_t *)native_read_cr3_pa();
pgd               398 arch/x86/mm/mem_encrypt_identity.c 	ppd.pgd = ppd.pgtable_area;
pgd               399 arch/x86/mm/mem_encrypt_identity.c 	memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
pgd               457 arch/x86/mm/mem_encrypt_identity.c 			    kernel_len, workarea_start, (unsigned long)ppd.pgd);
pgd               462 arch/x86/mm/mem_encrypt_identity.c 				    (unsigned long)ppd.pgd);
pgd                37 arch/x86/mm/pageattr.c 	pgd_t		*pgd;
pgd               567 arch/x86/mm/pageattr.c pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
pgd               576 arch/x86/mm/pageattr.c 	if (pgd_none(*pgd))
pgd               579 arch/x86/mm/pageattr.c 	p4d = p4d_offset(pgd, address);
pgd               625 arch/x86/mm/pageattr.c 	if (cpa->pgd)
pgd               626 arch/x86/mm/pageattr.c 		return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
pgd               638 arch/x86/mm/pageattr.c 	pgd_t *pgd;
pgd               642 arch/x86/mm/pageattr.c 	pgd = pgd_offset_k(address);
pgd               643 arch/x86/mm/pageattr.c 	if (pgd_none(*pgd))
pgd               646 arch/x86/mm/pageattr.c 	p4d = p4d_offset(pgd, address);
pgd               714 arch/x86/mm/pageattr.c 			pgd_t *pgd;
pgd               719 arch/x86/mm/pageattr.c 			pgd = (pgd_t *)page_address(page) + pgd_index(address);
pgd               720 arch/x86/mm/pageattr.c 			p4d = p4d_offset(pgd, address);
pgd              1404 arch/x86/mm/pageattr.c 	pgd_entry = cpa->pgd + pgd_index(addr);
pgd              1448 arch/x86/mm/pageattr.c 	if (cpa->pgd) {
pgd              1961 arch/x86/mm/pageattr.c 	cpa.pgd = init_mm.pgd;
pgd              2121 arch/x86/mm/pageattr.c 				.pgd = NULL,
pgd              2140 arch/x86/mm/pageattr.c 				.pgd = NULL,
pgd              2211 arch/x86/mm/pageattr.c int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
pgd              2219 arch/x86/mm/pageattr.c 		.pgd = pgd,
pgd              2248 arch/x86/mm/pageattr.c int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
pgd              2262 arch/x86/mm/pageattr.c 		.pgd		= pgd,
pgd                86 arch/x86/mm/pgtable.c static inline void pgd_list_add(pgd_t *pgd)
pgd                88 arch/x86/mm/pgtable.c 	struct page *page = virt_to_page(pgd);
pgd                93 arch/x86/mm/pgtable.c static inline void pgd_list_del(pgd_t *pgd)
pgd                95 arch/x86/mm/pgtable.c 	struct page *page = virt_to_page(pgd);
pgd               106 arch/x86/mm/pgtable.c static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
pgd               108 arch/x86/mm/pgtable.c 	virt_to_page(pgd)->pt_mm = mm;
pgd               116 arch/x86/mm/pgtable.c static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
pgd               124 arch/x86/mm/pgtable.c 		clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
pgd               131 arch/x86/mm/pgtable.c 		pgd_set_mm(pgd, mm);
pgd               132 arch/x86/mm/pgtable.c 		pgd_list_add(pgd);
pgd               136 arch/x86/mm/pgtable.c static void pgd_dtor(pgd_t *pgd)
pgd               142 arch/x86/mm/pgtable.c 	pgd_list_del(pgd);
pgd               257 arch/x86/mm/pgtable.c 	pgd_t pgd = *pgdp;
pgd               259 arch/x86/mm/pgtable.c 	if (pgd_val(pgd) != 0) {
pgd               260 arch/x86/mm/pgtable.c 		pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
pgd               264 arch/x86/mm/pgtable.c 		paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
pgd               289 arch/x86/mm/pgtable.c static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
pgd               298 arch/x86/mm/pgtable.c 	p4d = p4d_offset(pgd, 0);
pgd               396 arch/x86/mm/pgtable.c static inline void _pgd_free(pgd_t *pgd)
pgd               399 arch/x86/mm/pgtable.c 		free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
pgd               401 arch/x86/mm/pgtable.c 		kmem_cache_free(pgd_cache, pgd);
pgd               411 arch/x86/mm/pgtable.c static inline void _pgd_free(pgd_t *pgd)
pgd               413 arch/x86/mm/pgtable.c 	free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
pgd               419 arch/x86/mm/pgtable.c 	pgd_t *pgd;
pgd               423 arch/x86/mm/pgtable.c 	pgd = _pgd_alloc();
pgd               425 arch/x86/mm/pgtable.c 	if (pgd == NULL)
pgd               428 arch/x86/mm/pgtable.c 	mm->pgd = pgd;
pgd               446 arch/x86/mm/pgtable.c 	pgd_ctor(mm, pgd);
pgd               447 arch/x86/mm/pgtable.c 	pgd_prepopulate_pmd(mm, pgd, pmds);
pgd               448 arch/x86/mm/pgtable.c 	pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
pgd               452 arch/x86/mm/pgtable.c 	return pgd;
pgd               459 arch/x86/mm/pgtable.c 	_pgd_free(pgd);
pgd               464 arch/x86/mm/pgtable.c void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd               466 arch/x86/mm/pgtable.c 	pgd_mop_up_pmds(mm, pgd);
pgd               467 arch/x86/mm/pgtable.c 	pgd_dtor(pgd);
pgd               468 arch/x86/mm/pgtable.c 	paravirt_pgd_free(mm, pgd);
pgd               469 arch/x86/mm/pgtable.c 	_pgd_free(pgd);
pgd                30 arch/x86/mm/pgtable_32.c 	pgd_t *pgd;
pgd                36 arch/x86/mm/pgtable_32.c 	pgd = swapper_pg_dir + pgd_index(vaddr);
pgd                37 arch/x86/mm/pgtable_32.c 	if (pgd_none(*pgd)) {
pgd                41 arch/x86/mm/pgtable_32.c 	p4d = p4d_offset(pgd, vaddr);
pgd               125 arch/x86/mm/pti.c pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
pgd               137 arch/x86/mm/pti.c 		return pgd;
pgd               143 arch/x86/mm/pti.c 	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
pgd               158 arch/x86/mm/pti.c 	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
pgd               160 arch/x86/mm/pti.c 		pgd.pgd |= _PAGE_NX;
pgd               163 arch/x86/mm/pti.c 	return pgd;
pgd               174 arch/x86/mm/pti.c 	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
pgd               182 arch/x86/mm/pti.c 	if (pgd_none(*pgd)) {
pgd               187 arch/x86/mm/pti.c 		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
pgd               189 arch/x86/mm/pti.c 	BUILD_BUG_ON(pgd_large(*pgd) != 0);
pgd               191 arch/x86/mm/pti.c 	return p4d_offset(pgd, address);
pgd               316 arch/x86/mm/pti.c 		pgd_t *pgd;
pgd               324 arch/x86/mm/pti.c 		pgd = pgd_offset_k(addr);
pgd               325 arch/x86/mm/pti.c 		if (WARN_ON(pgd_none(*pgd)))
pgd               327 arch/x86/mm/pti.c 		p4d = p4d_offset(pgd, addr);
pgd               167 arch/x86/mm/tlb.c 	pgd_t *pgd = pgd_offset(mm, sp);
pgd               170 arch/x86/mm/tlb.c 		if (unlikely(pgd_none(*pgd))) {
pgd               173 arch/x86/mm/tlb.c 			set_pgd(pgd, *pgd_ref);
pgd               181 arch/x86/mm/tlb.c 		p4d_t *p4d = p4d_offset(pgd, sp);
pgd               309 arch/x86/mm/tlb.c 	if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
pgd               417 arch/x86/mm/tlb.c 		load_new_mm_cr3(next->pgd, new_asid, true);
pgd               430 arch/x86/mm/tlb.c 		load_new_mm_cr3(next->pgd, new_asid, false);
pgd               490 arch/x86/mm/tlb.c 	WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
pgd               501 arch/x86/mm/tlb.c 	write_cr3(build_cr3(mm->pgd, 0));
pgd                82 arch/x86/platform/efi/efi_64.c 	int pgd;
pgd                87 arch/x86/platform/efi/efi_64.c 		return efi_mm.pgd;
pgd               105 arch/x86/platform/efi/efi_64.c 	for (pgd = 0; pgd < n_pgds; pgd++) {
pgd               106 arch/x86/platform/efi/efi_64.c 		addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
pgd               107 arch/x86/platform/efi/efi_64.c 		vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
pgd               109 arch/x86/platform/efi/efi_64.c 		save_pgd[pgd] = *pgd_efi;
pgd               140 arch/x86/platform/efi/efi_64.c 		pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
pgd               157 arch/x86/platform/efi/efi_64.c 	pgd_t *pgd;
pgd               169 arch/x86/platform/efi/efi_64.c 		pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
pgd               172 arch/x86/platform/efi/efi_64.c 		if (!pgd_present(*pgd))
pgd               176 arch/x86/platform/efi/efi_64.c 			p4d = p4d_offset(pgd,
pgd               186 arch/x86/platform/efi/efi_64.c 		p4d = (p4d_t *)pgd_page_vaddr(*pgd);
pgd               209 arch/x86/platform/efi/efi_64.c 	pgd_t *pgd, *efi_pgd;
pgd               222 arch/x86/platform/efi/efi_64.c 	pgd = efi_pgd + pgd_index(EFI_VA_END);
pgd               223 arch/x86/platform/efi/efi_64.c 	p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
pgd               232 arch/x86/platform/efi/efi_64.c 			free_page((unsigned long) pgd_page_vaddr(*pgd));
pgd               237 arch/x86/platform/efi/efi_64.c 	efi_mm.pgd = efi_pgd;
pgd               253 arch/x86/platform/efi/efi_64.c 	pgd_t *efi_pgd = efi_mm.pgd;
pgd               344 arch/x86/platform/efi/efi_64.c 	pgd_t *pgd = efi_mm.pgd;
pgd               357 arch/x86/platform/efi/efi_64.c 	if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
pgd               377 arch/x86/platform/efi/efi_64.c 	if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
pgd               404 arch/x86/platform/efi/efi_64.c 	if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
pgd               416 arch/x86/platform/efi/efi_64.c 	pgd_t *pgd = efi_mm.pgd;
pgd               425 arch/x86/platform/efi/efi_64.c 	if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
pgd               520 arch/x86/platform/efi/efi_64.c 	pgd_t *pgd = efi_mm.pgd;
pgd               525 arch/x86/platform/efi/efi_64.c 	err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
pgd               531 arch/x86/platform/efi/efi_64.c 	err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
pgd               617 arch/x86/platform/efi/efi_64.c 		ptdump_walk_pgd_level(NULL, efi_mm.pgd);
pgd               378 arch/x86/platform/efi/quirks.c 	pgd_t *pgd = efi_mm.pgd;
pgd               398 arch/x86/platform/efi/quirks.c 	if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages))
pgd               401 arch/x86/platform/efi/quirks.c 	if (kernel_unmap_pages_in_pgd(pgd, va, md->num_pages))
pgd               213 arch/x86/power/hibernate.c 	pgd_t *pgd;
pgd               226 arch/x86/power/hibernate.c 	pgd = (pgd_t *)__va(read_cr3_pa()) +
pgd               228 arch/x86/power/hibernate.c 	p4d = p4d_offset(pgd, relocated_restore_code);
pgd                30 arch/x86/power/hibernate_32.c static pmd_t *resume_one_md_table_init(pgd_t *pgd)
pgd                41 arch/x86/power/hibernate_32.c 	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
pgd                42 arch/x86/power/hibernate_32.c 	p4d = p4d_offset(pgd, 0);
pgd                47 arch/x86/power/hibernate_32.c 	p4d = p4d_offset(pgd, 0);
pgd                84 arch/x86/power/hibernate_32.c 	pgd_t *pgd;
pgd                90 arch/x86/power/hibernate_32.c 	pgd = pgd_base + pgd_idx;
pgd                93 arch/x86/power/hibernate_32.c 	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
pgd                94 arch/x86/power/hibernate_32.c 		pmd = resume_one_md_table_init(pgd);
pgd               147 arch/x86/power/hibernate_32.c 	pgd_t *pgd;
pgd               151 arch/x86/power/hibernate_32.c 	pgd = pgd_base + pgd_index(restore_jump_address);
pgd               153 arch/x86/power/hibernate_32.c 	pmd = resume_one_md_table_init(pgd);
pgd                28 arch/x86/power/hibernate_64.c static int set_up_temporary_text_mapping(pgd_t *pgd)
pgd                77 arch/x86/power/hibernate_64.c 		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
pgd                81 arch/x86/power/hibernate_64.c 		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
pgd               100 arch/x86/power/hibernate_64.c 	pgd_t *pgd;
pgd               104 arch/x86/power/hibernate_64.c 	pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
pgd               105 arch/x86/power/hibernate_64.c 	if (!pgd)
pgd               109 arch/x86/power/hibernate_64.c 	result = set_up_temporary_text_mapping(pgd);
pgd               118 arch/x86/power/hibernate_64.c 		result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
pgd               123 arch/x86/power/hibernate_64.c 	temp_pgt = __pa(pgd);
pgd               109 arch/x86/realmode/init.c 	trampoline_pgd[0] = trampoline_pgd_entry.pgd;
pgd               110 arch/x86/realmode/init.c 	trampoline_pgd[511] = init_top_pgt[511].pgd;
pgd                53 arch/x86/xen/mmu_hvm.c 	a.gpa = __pa(mm->pgd);
pgd               383 arch/x86/xen/mmu_pv.c __visible pgdval_t xen_pgd_val(pgd_t pgd)
pgd               385 arch/x86/xen/mmu_pv.c 	return pte_mfn_to_pfn(pgd.pgd);
pgd               397 arch/x86/xen/mmu_pv.c __visible pgd_t xen_make_pgd(pgdval_t pgd)
pgd               399 arch/x86/xen/mmu_pv.c 	pgd = pte_pfn_to_mfn(pgd);
pgd               400 arch/x86/xen/mmu_pv.c 	return native_make_pgd(pgd);
pgd               484 arch/x86/xen/mmu_pv.c static pgd_t *xen_get_user_pgd(pgd_t *pgd)
pgd               486 arch/x86/xen/mmu_pv.c 	pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
pgd               487 arch/x86/xen/mmu_pv.c 	unsigned offset = pgd - pgd_page;
pgd               542 arch/x86/xen/mmu_pv.c 			pgd_val.pgd = p4d_val_ma(val);
pgd               645 arch/x86/xen/mmu_pv.c static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
pgd               673 arch/x86/xen/mmu_pv.c 		if (pgd_none(pgd[i]))
pgd               676 arch/x86/xen/mmu_pv.c 		p4d = p4d_offset(&pgd[i], 0);
pgd               682 arch/x86/xen/mmu_pv.c 	flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
pgd               692 arch/x86/xen/mmu_pv.c 	return __xen_pgd_walk(mm, mm->pgd, func, limit);
pgd               788 arch/x86/xen/mmu_pv.c static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
pgd               790 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_pgd_pin(mm, pgd);
pgd               794 arch/x86/xen/mmu_pv.c 	if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
pgd               805 arch/x86/xen/mmu_pv.c 		pgd_t *user_pgd = xen_get_user_pgd(pgd);
pgd               807 arch/x86/xen/mmu_pv.c 		xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
pgd               818 arch/x86/xen/mmu_pv.c 	xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
pgd               821 arch/x86/xen/mmu_pv.c 	xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
pgd               828 arch/x86/xen/mmu_pv.c 	__xen_pgd_pin(mm, mm->pgd);
pgd               920 arch/x86/xen/mmu_pv.c static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
pgd               922 arch/x86/xen/mmu_pv.c 	trace_xen_mmu_pgd_unpin(mm, pgd);
pgd               926 arch/x86/xen/mmu_pv.c 	xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
pgd               930 arch/x86/xen/mmu_pv.c 		pgd_t *user_pgd = xen_get_user_pgd(pgd);
pgd               942 arch/x86/xen/mmu_pv.c 	xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
pgd               946 arch/x86/xen/mmu_pv.c 	__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
pgd               953 arch/x86/xen/mmu_pv.c 	__xen_pgd_unpin(mm, mm->pgd);
pgd              1002 arch/x86/xen/mmu_pv.c 	if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
pgd              1021 arch/x86/xen/mmu_pv.c 			if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
pgd              1037 arch/x86/xen/mmu_pv.c 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
pgd              1074 arch/x86/xen/mmu_pv.c 	if (xen_page_pinned(mm->pgd))
pgd              1210 arch/x86/xen/mmu_pv.c 	pgd_t *pgd;
pgd              1216 arch/x86/xen/mmu_pv.c 	pgd = pgd_offset_k(vaddr);
pgd              1217 arch/x86/xen/mmu_pv.c 	p4d = p4d_offset(pgd, 0);
pgd              1485 arch/x86/xen/mmu_pv.c 	pgd_t *pgd = mm->pgd;
pgd              1488 arch/x86/xen/mmu_pv.c 	BUG_ON(PagePinned(virt_to_page(pgd)));
pgd              1492 arch/x86/xen/mmu_pv.c 		struct page *page = virt_to_page(pgd);
pgd              1510 arch/x86/xen/mmu_pv.c 		BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
pgd              1516 arch/x86/xen/mmu_pv.c static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd              1519 arch/x86/xen/mmu_pv.c 	pgd_t *user_pgd = xen_get_user_pgd(pgd);
pgd              1635 arch/x86/xen/mmu_pv.c 	bool pinned = xen_page_pinned(mm->pgd);
pgd              1875 arch/x86/xen/mmu_pv.c void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
pgd              1913 arch/x86/xen/mmu_pv.c 	l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
pgd              1916 arch/x86/xen/mmu_pv.c 	addr[0] = (unsigned long)pgd;
pgd              1965 arch/x86/xen/mmu_pv.c 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
pgd              2015 arch/x86/xen/mmu_pv.c 	pgd_t pgd;
pgd              2021 arch/x86/xen/mmu_pv.c 	pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
pgd              2022 arch/x86/xen/mmu_pv.c 						       sizeof(pgd)));
pgd              2023 arch/x86/xen/mmu_pv.c 	if (!pgd_present(pgd))
pgd              2026 arch/x86/xen/mmu_pv.c 	pa = pgd_val(pgd) & PTE_PFN_MASK;
pgd              2064 arch/x86/xen/mmu_pv.c 	pgd_t *pgd;
pgd              2093 arch/x86/xen/mmu_pv.c 	pgd = __va(read_cr3_pa());
pgd              2133 arch/x86/xen/mmu_pv.c 		set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
pgd              2150 arch/x86/xen/mmu_pv.c 		set_pgd(pgd + 1, __pgd(0));
pgd              2234 arch/x86/xen/mmu_pv.c void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
pgd              2238 arch/x86/xen/mmu_pv.c 	kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
pgd              2252 arch/x86/xen/mmu_pv.c 	copy_page(initial_page_table, pgd);
pgd              2260 arch/x86/xen/mmu_pv.c 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
pgd                36 arch/x86/xen/xen-ops.h void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
pgd                98 arch/xtensa/include/asm/page.h typedef struct { unsigned long pgd; } pgd_t;		/* PGD table entry */
pgd               103 arch/xtensa/include/asm/page.h #define pgd_val(x)	((x).pgd)
pgd                31 arch/xtensa/include/asm/pgalloc.h static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd                33 arch/xtensa/include/asm/pgalloc.h 	free_page((unsigned long)pgd);
pgd               370 arch/xtensa/include/asm/pgtable.h #define pgd_offset(mm,address)	((mm)->pgd + pgd_index(address))
pgd               113 arch/xtensa/kernel/asm-offsets.c 	DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
pgd               199 arch/xtensa/mm/fault.c 		pgd_t *pgd, *pgd_k;
pgd               206 arch/xtensa/mm/fault.c 		pgd = act_mm->pgd + index;
pgd               207 arch/xtensa/mm/fault.c 		pgd_k = init_mm.pgd + index;
pgd               212 arch/xtensa/mm/fault.c 		pgd_val(*pgd) = pgd_val(*pgd_k);
pgd               214 arch/xtensa/mm/fault.c 		pmd = pmd_offset(pgd, address);
pgd                22 arch/xtensa/mm/kasan_init.c 	pgd_t *pgd = pgd_offset_k(vaddr);
pgd                23 arch/xtensa/mm/kasan_init.c 	pmd_t *pmd = pmd_offset(pgd, vaddr);
pgd                44 arch/xtensa/mm/kasan_init.c 	pgd_t *pgd = pgd_offset_k(vaddr);
pgd                45 arch/xtensa/mm/kasan_init.c 	pmd_t *pmd = pmd_offset(pgd, vaddr);
pgd                24 arch/xtensa/mm/mmu.c 	pgd_t *pgd = pgd_offset_k(vaddr);
pgd                25 arch/xtensa/mm/mmu.c 	pmd_t *pmd = pmd_offset(pgd, vaddr);
pgd               171 arch/xtensa/mm/tlb.c 	pgd_t *pgd;
pgd               177 arch/xtensa/mm/tlb.c 	pgd = pgd_offset(mm, vaddr);
pgd               178 arch/xtensa/mm/tlb.c 	if (pgd_none_or_clear_bad(pgd))
pgd               180 arch/xtensa/mm/tlb.c 	pmd = pmd_offset(pgd, vaddr);
pgd                59 drivers/firmware/efi/arm-runtime.c 	efi_mm.pgd = pgd_alloc(&efi_mm);
pgd               269 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c 			      &chan->pgd);
pgd               210 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c 	nvkm_gpuobj_del(&chan->pgd);
pgd               263 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c 			      &chan->pgd);
pgd                15 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h 	struct nvkm_gpuobj *pgd;
pgd               124 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c 	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
pgd               214 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c 		nvkm_gpuobj_del(&bar->pgd);
pgd                12 drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h 	struct nvkm_gpuobj *pgd;
pgd               142 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
pgd               150 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	if (--pgd->refs[0]) {
pgd               157 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			if (pgd->pt[0]) {
pgd               159 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 					func->sparse(vmm, pgd->pt[0], pdei, 1);
pgd               160 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 					pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
pgd               162 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 					func->unmap(vmm, pgd->pt[0], pdei, 1);
pgd               163 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 					pgd->pde[pdei] = NULL;
pgd               170 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				func->pde(vmm, pgd, pdei);
pgd               171 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				pgd->pde[pdei] = NULL;
pgd               177 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			func->pde(vmm, pgd, pdei);
pgd               413 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
pgd               417 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
pgd               426 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	pgd->refs[0]++;
pgd               478 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
pgd               484 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
pgd               487 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
pgd               491 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		if (!pgd->refs[0])
pgd               496 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	pgd->pde[pdei] = pgt;
pgd               541 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			struct nvkm_vmm_pt *pgd = pgt;
pgd               544 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
pgd               545 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
pgd               548 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 			it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
pgd               557 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 				if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
pgd               106 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c gf100_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
pgd               108 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
pgd               109 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	struct nvkm_mmu_pt *pd = pgd->pt[0];
pgd               230 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
pgd               232 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
pgd               233 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	struct nvkm_mmu_pt *pd = pgd->pt[0];
pgd               270 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
pgd               272 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
pgd               273 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	struct nvkm_mmu_pt *pd = pgd->pt[0];
pgd               145 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c nv50_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
pgd               151 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	if (!nv50_vmm_pde(vmm, pgd->pde[pdei], &data))
pgd               653 drivers/iommu/amd_iommu_v2.c 					__pa(pasid_state->mm->pgd));
pgd               340 drivers/iommu/exynos-iommu.c static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
pgd               343 drivers/iommu/exynos-iommu.c 		writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
pgd               345 drivers/iommu/exynos-iommu.c 		writel(pgd >> PAGE_SHIFT,
pgd               888 drivers/iommu/intel-iommu.c 	BUG_ON(!domain->pgd);
pgd               894 drivers/iommu/intel-iommu.c 	parent = domain->pgd;
pgd               944 drivers/iommu/intel-iommu.c 	parent = domain->pgd;
pgd              1055 drivers/iommu/intel-iommu.c 			   domain->pgd, 0, start_pfn, last_pfn);
pgd              1059 drivers/iommu/intel-iommu.c 		free_pgtable_page(domain->pgd);
pgd              1060 drivers/iommu/intel-iommu.c 		domain->pgd = NULL;
pgd              1158 drivers/iommu/intel-iommu.c 				       domain->pgd, 0, start_pfn, last_pfn, NULL);
pgd              1162 drivers/iommu/intel-iommu.c 		struct page *pgd_page = virt_to_page(domain->pgd);
pgd              1166 drivers/iommu/intel-iommu.c 		domain->pgd = NULL;
pgd              1897 drivers/iommu/intel-iommu.c 	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
pgd              1898 drivers/iommu/intel-iommu.c 	if (!domain->pgd)
pgd              1900 drivers/iommu/intel-iommu.c 	__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
pgd              1913 drivers/iommu/intel-iommu.c 	if (domain->pgd) {
pgd              1993 drivers/iommu/intel-iommu.c 	BUG_ON(!domain->pgd);
pgd              2054 drivers/iommu/intel-iommu.c 		struct dma_pte *pgd = domain->pgd;
pgd              2066 drivers/iommu/intel-iommu.c 				pgd = phys_to_virt(dma_pte_addr(pgd));
pgd              2067 drivers/iommu/intel-iommu.c 				if (!dma_pte_present(pgd))
pgd              2077 drivers/iommu/intel-iommu.c 			context_set_address_root(context, virt_to_phys(pgd));
pgd              5156 drivers/iommu/intel-iommu.c 	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
pgd              5157 drivers/iommu/intel-iommu.c 	if (!domain->pgd)
pgd              5159 drivers/iommu/intel-iommu.c 	domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
pgd              5370 drivers/iommu/intel-iommu.c 		pte = dmar_domain->pgd;
pgd              5372 drivers/iommu/intel-iommu.c 			dmar_domain->pgd = (struct dma_pte *)
pgd               473 drivers/iommu/intel-pasid.c 				  struct device *dev, pgd_t *pgd,
pgd               491 drivers/iommu/intel-pasid.c 	pasid_set_flptr(pte, (u64)__pa(pgd));
pgd               543 drivers/iommu/intel-pasid.c 	struct dma_pte *pgd;
pgd               562 drivers/iommu/intel-pasid.c 	pgd = domain->pgd;
pgd               564 drivers/iommu/intel-pasid.c 		pgd = phys_to_virt(dma_pte_addr(pgd));
pgd               565 drivers/iommu/intel-pasid.c 		if (!dma_pte_present(pgd)) {
pgd               571 drivers/iommu/intel-pasid.c 	pgd_val = virt_to_phys(pgd);
pgd                88 drivers/iommu/intel-pasid.h 				  struct device *dev, pgd_t *pgd,
pgd               345 drivers/iommu/intel-svm.c 				mm ? mm->pgd : init_mm.pgd,
pgd               366 drivers/iommu/intel-svm.c 						mm ? mm->pgd : init_mm.pgd,
pgd               169 drivers/iommu/io-pgtable-arm-v7s.c 	arm_v7s_iopte		*pgd;
pgd               535 drivers/iommu/io-pgtable-arm-v7s.c 	ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
pgd               556 drivers/iommu/io-pgtable-arm-v7s.c 		arm_v7s_iopte pte = data->pgd[i];
pgd               562 drivers/iommu/io-pgtable-arm-v7s.c 	__arm_v7s_free_table(data->pgd, 1, data);
pgd               725 drivers/iommu/io-pgtable-arm-v7s.c 	return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd);
pgd               732 drivers/iommu/io-pgtable-arm-v7s.c 	arm_v7s_iopte *ptep = data->pgd, pte;
pgd               818 drivers/iommu/io-pgtable-arm-v7s.c 	data->pgd = __arm_v7s_alloc_table(1, GFP_KERNEL, data);
pgd               819 drivers/iommu/io-pgtable-arm-v7s.c 	if (!data->pgd)
pgd               826 drivers/iommu/io-pgtable-arm-v7s.c 	cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
pgd               188 drivers/iommu/io-pgtable-arm.c 	void			*pgd;
pgd               482 drivers/iommu/io-pgtable-arm.c 	arm_lpae_iopte *ptep = data->pgd;
pgd               540 drivers/iommu/io-pgtable-arm.c 	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
pgd               655 drivers/iommu/io-pgtable-arm.c 	arm_lpae_iopte *ptep = data->pgd;
pgd               668 drivers/iommu/io-pgtable-arm.c 	arm_lpae_iopte pte, *ptep = data->pgd;
pgd               871 drivers/iommu/io-pgtable-arm.c 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
pgd               872 drivers/iommu/io-pgtable-arm.c 	if (!data->pgd)
pgd               879 drivers/iommu/io-pgtable-arm.c 	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
pgd               968 drivers/iommu/io-pgtable-arm.c 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
pgd               969 drivers/iommu/io-pgtable-arm.c 	if (!data->pgd)
pgd               976 drivers/iommu/io-pgtable-arm.c 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
pgd              1056 drivers/iommu/io-pgtable-arm.c 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
pgd              1057 drivers/iommu/io-pgtable-arm.c 	if (!data->pgd)
pgd              1063 drivers/iommu/io-pgtable-arm.c 	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
pgd              1136 drivers/iommu/io-pgtable-arm.c 		data->bits_per_level, data->pgd);
pgd              1630 drivers/iommu/omap-iommu.c 	u32 *pgd, *pte;
pgd              1637 drivers/iommu/omap-iommu.c 	iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
pgd              1648 drivers/iommu/omap-iommu.c 		if (iopgd_is_section(*pgd))
pgd              1649 drivers/iommu/omap-iommu.c 			ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
pgd              1650 drivers/iommu/omap-iommu.c 		else if (iopgd_is_super(*pgd))
pgd              1651 drivers/iommu/omap-iommu.c 			ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
pgd              1653 drivers/iommu/omap-iommu.c 			dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
pgd               282 fs/userfaultfd.c 	pgd_t *pgd;
pgd               291 fs/userfaultfd.c 	pgd = pgd_offset(mm, address);
pgd               292 fs/userfaultfd.c 	if (!pgd_present(*pgd))
pgd               294 fs/userfaultfd.c 	p4d = p4d_offset(pgd, address);
pgd                19 include/asm-generic/4level-fixup.h #define pud_offset(pgd, start)		(pgd)
pgd                20 include/asm-generic/5level-fixup.h #define p4d_alloc(mm, pgd, address)	(pgd)
pgd                21 include/asm-generic/5level-fixup.h #define p4d_offset(pgd, start)		(pgd)
pgd                44 include/asm-generic/page.h 	unsigned long pgd;
pgd                53 include/asm-generic/page.h #define pgd_val(x)	((x).pgd)
pgd                15 include/asm-generic/pgtable-nop4d-hack.h typedef struct { pgd_t pgd; } pud_t;
pgd                27 include/asm-generic/pgtable-nop4d-hack.h static inline int pgd_none(pgd_t pgd)		{ return 0; }
pgd                28 include/asm-generic/pgtable-nop4d-hack.h static inline int pgd_bad(pgd_t pgd)		{ return 0; }
pgd                29 include/asm-generic/pgtable-nop4d-hack.h static inline int pgd_present(pgd_t pgd)	{ return 1; }
pgd                30 include/asm-generic/pgtable-nop4d-hack.h static inline void pgd_clear(pgd_t *pgd)	{ }
pgd                31 include/asm-generic/pgtable-nop4d-hack.h #define pud_ERROR(pud)				(pgd_ERROR((pud).pgd))
pgd                33 include/asm-generic/pgtable-nop4d-hack.h #define pgd_populate(mm, pgd, pud)		do { } while (0)
pgd                34 include/asm-generic/pgtable-nop4d-hack.h #define pgd_populate_safe(mm, pgd, pud)		do { } while (0)
pgd                41 include/asm-generic/pgtable-nop4d-hack.h static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
pgd                43 include/asm-generic/pgtable-nop4d-hack.h 	return (pud_t *)pgd;
pgd                46 include/asm-generic/pgtable-nop4d-hack.h #define pud_val(x)				(pgd_val((x).pgd))
pgd                49 include/asm-generic/pgtable-nop4d-hack.h #define pgd_page(pgd)				(pud_page((pud_t){ pgd }))
pgd                50 include/asm-generic/pgtable-nop4d-hack.h #define pgd_page_vaddr(pgd)			(pud_page_vaddr((pud_t){ pgd }))
pgd                 9 include/asm-generic/pgtable-nop4d.h typedef struct { pgd_t pgd; } p4d_t;
pgd                22 include/asm-generic/pgtable-nop4d.h static inline int pgd_none(pgd_t pgd)		{ return 0; }
pgd                23 include/asm-generic/pgtable-nop4d.h static inline int pgd_bad(pgd_t pgd)		{ return 0; }
pgd                24 include/asm-generic/pgtable-nop4d.h static inline int pgd_present(pgd_t pgd)	{ return 1; }
pgd                25 include/asm-generic/pgtable-nop4d.h static inline void pgd_clear(pgd_t *pgd)	{ }
pgd                26 include/asm-generic/pgtable-nop4d.h #define p4d_ERROR(p4d)				(pgd_ERROR((p4d).pgd))
pgd                28 include/asm-generic/pgtable-nop4d.h #define pgd_populate(mm, pgd, p4d)		do { } while (0)
pgd                29 include/asm-generic/pgtable-nop4d.h #define pgd_populate_safe(mm, pgd, p4d)		do { } while (0)
pgd                36 include/asm-generic/pgtable-nop4d.h static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
pgd                38 include/asm-generic/pgtable-nop4d.h 	return (p4d_t *)pgd;
pgd                41 include/asm-generic/pgtable-nop4d.h #define p4d_val(x)				(pgd_val((x).pgd))
pgd                44 include/asm-generic/pgtable-nop4d.h #define pgd_page(pgd)				(p4d_page((p4d_t){ pgd }))
pgd                45 include/asm-generic/pgtable-nop4d.h #define pgd_page_vaddr(pgd)			(p4d_page_vaddr((p4d_t){ pgd }))
pgd               373 include/asm-generic/pgtable.h #define pgd_access_permitted(pgd, write) \
pgd               374 include/asm-generic/pgtable.h 	(pgd_present(pgd) && (!(write) || pgd_write(pgd)))
pgd               435 include/asm-generic/pgtable.h #define set_pgd_safe(pgdp, pgd) \
pgd               437 include/asm-generic/pgtable.h 	WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
pgd               438 include/asm-generic/pgtable.h 	set_pgd(pgdp, pgd); \
pgd               565 include/asm-generic/pgtable.h static inline int pgd_none_or_clear_bad(pgd_t *pgd)
pgd               567 include/asm-generic/pgtable.h 	if (pgd_none(*pgd))
pgd               569 include/asm-generic/pgtable.h 	if (unlikely(pgd_bad(*pgd))) {
pgd               570 include/asm-generic/pgtable.h 		pgd_clear_bad(pgd);
pgd               135 include/linux/hugetlb.h 			     pgd_t *pgd, int flags);
pgd               180 include/linux/hugetlb.h #define follow_huge_pgd(mm, addr, pgd, flags)	NULL
pgd               237 include/linux/hugetlb.h static inline int pgd_write(pgd_t pgd)
pgd               496 include/linux/intel-iommu.h 	struct dma_pte	*pgd;		/* virtual address */
pgd               576 include/linux/mm.h static inline int pgd_devmap(pgd_t pgd)
pgd              1761 include/linux/mm.h static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
pgd              1767 include/linux/mm.h int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
pgd              1867 include/linux/mm.h static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
pgd              1870 include/linux/mm.h 	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
pgd              1871 include/linux/mm.h 		NULL : p4d_offset(pgd, address);
pgd              2776 include/linux/mm.h p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pgd               389 include/linux/mm_types.h 		pgd_t * pgd;
pgd               341 include/trace/events/xen.h 	    TP_PROTO(struct mm_struct *mm, pgd_t *pgd),
pgd               342 include/trace/events/xen.h 	    TP_ARGS(mm, pgd),
pgd               345 include/trace/events/xen.h 		    __field(pgd_t *, pgd)
pgd               348 include/trace/events/xen.h 			   __entry->pgd = pgd),
pgd               349 include/trace/events/xen.h 	    TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd)
pgd               353 include/trace/events/xen.h 		TP_PROTO(struct mm_struct *mm, pgd_t *pgd),	\
pgd               354 include/trace/events/xen.h 		     TP_ARGS(mm, pgd))
pgd               631 kernel/fork.c  	mm->pgd = pgd_alloc(mm);
pgd               632 kernel/fork.c  	if (unlikely(!mm->pgd))
pgd               639 kernel/fork.c  	pgd_free(mm, mm->pgd);
pgd               189 lib/ioremap.c  static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
pgd               195 lib/ioremap.c  	p4d = p4d_alloc(&init_mm, pgd, addr);
pgd               213 lib/ioremap.c  	pgd_t *pgd;
pgd               222 lib/ioremap.c  	pgd = pgd_offset_k(addr);
pgd               225 lib/ioremap.c  		err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
pgd               228 lib/ioremap.c  	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
pgd               171 mm/debug.c     		mm->pgd, atomic_read(&mm->mm_users),
pgd               521 mm/gup.c       	pgd_t *pgd;
pgd               534 mm/gup.c       	pgd = pgd_offset(mm, address);
pgd               536 mm/gup.c       	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
pgd               539 mm/gup.c       	if (pgd_huge(*pgd)) {
pgd               540 mm/gup.c       		page = follow_huge_pgd(mm, address, pgd, flags);
pgd               545 mm/gup.c       	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
pgd               547 mm/gup.c       				      __hugepd(pgd_val(*pgd)), flags,
pgd               554 mm/gup.c       	return follow_p4d_mask(vma, address, pgd, flags, ctx);
pgd               573 mm/gup.c       	pgd_t *pgd;
pgd               584 mm/gup.c       		pgd = pgd_offset_k(address);
pgd               586 mm/gup.c       		pgd = pgd_offset_gate(mm, address);
pgd               587 mm/gup.c       	if (pgd_none(*pgd))
pgd               589 mm/gup.c       	p4d = p4d_offset(pgd, address);
pgd              2258 mm/gup.c       static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
pgd              2264 mm/gup.c       	p4dp = p4d_offset(&pgd, addr);
pgd              2291 mm/gup.c       		pgd_t pgd = READ_ONCE(*pgdp);
pgd              2294 mm/gup.c       		if (pgd_none(pgd))
pgd              2296 mm/gup.c       		if (unlikely(pgd_huge(pgd))) {
pgd              2297 mm/gup.c       			if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
pgd              2300 mm/gup.c       		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
pgd              2301 mm/gup.c       			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
pgd              2304 mm/gup.c       		} else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr))
pgd              2347 mm/huge_memory.c 	pgd_t *pgd;
pgd              2352 mm/huge_memory.c 	pgd = pgd_offset(vma->vm_mm, address);
pgd              2353 mm/huge_memory.c 	if (!pgd_present(*pgd))
pgd              2356 mm/huge_memory.c 	p4d = p4d_offset(pgd, address);
pgd              4942 mm/hugetlb.c   	pgd_t *pgd = pgd_offset(mm, *addr);
pgd              4943 mm/hugetlb.c   	p4d_t *p4d = p4d_offset(pgd, *addr);
pgd              4979 mm/hugetlb.c   	pgd_t *pgd;
pgd              4984 mm/hugetlb.c   	pgd = pgd_offset(mm, addr);
pgd              4985 mm/hugetlb.c   	p4d = p4d_alloc(mm, pgd, addr);
pgd              5017 mm/hugetlb.c   	pgd_t *pgd;
pgd              5022 mm/hugetlb.c   	pgd = pgd_offset(mm, addr);
pgd              5023 mm/hugetlb.c   	if (!pgd_present(*pgd))
pgd              5025 mm/hugetlb.c   	p4d = p4d_offset(pgd, addr);
pgd              5117 mm/hugetlb.c   follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
pgd              5122 mm/hugetlb.c   	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
pgd                31 mm/init-mm.c   	.pgd		= swapper_pg_dir,
pgd               641 mm/kasan/common.c 	pgd_t *pgd = pgd_offset_k(addr);
pgd               647 mm/kasan/common.c 	if (pgd_none(*pgd))
pgd               649 mm/kasan/common.c 	p4d = p4d_offset(pgd, addr);
pgd                38 mm/kasan/init.c static inline bool kasan_p4d_table(pgd_t pgd)
pgd                40 mm/kasan/init.c 	return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
pgd                43 mm/kasan/init.c static inline bool kasan_p4d_table(pgd_t pgd)
pgd               183 mm/kasan/init.c static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
pgd               186 mm/kasan/init.c 	p4d_t *p4d = p4d_offset(pgd, addr);
pgd               235 mm/kasan/init.c 	pgd_t *pgd = pgd_offset_k(addr);
pgd               264 mm/kasan/init.c 			pgd_populate(&init_mm, pgd,
pgd               267 mm/kasan/init.c 			p4d = p4d_offset(pgd, addr);
pgd               279 mm/kasan/init.c 		if (pgd_none(*pgd)) {
pgd               283 mm/kasan/init.c 				p = p4d_alloc(&init_mm, pgd, addr);
pgd               287 mm/kasan/init.c 				pgd_populate(&init_mm, pgd,
pgd               291 mm/kasan/init.c 		zero_p4d_populate(pgd, addr, next);
pgd               292 mm/kasan/init.c 	} while (pgd++, addr = next, addr != end);
pgd               342 mm/kasan/init.c static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
pgd               353 mm/kasan/init.c 	p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
pgd               354 mm/kasan/init.c 	pgd_clear(pgd);
pgd               455 mm/kasan/init.c 	pgd_t *pgd;
pgd               470 mm/kasan/init.c 		pgd = pgd_offset_k(addr);
pgd               471 mm/kasan/init.c 		if (!pgd_present(*pgd))
pgd               474 mm/kasan/init.c 		if (kasan_p4d_table(*pgd)) {
pgd               477 mm/kasan/init.c 				pgd_clear(pgd);
pgd               481 mm/kasan/init.c 		p4d = p4d_offset(pgd, addr);
pgd               483 mm/kasan/init.c 		kasan_free_p4d(p4d_offset(pgd, 0), pgd);
pgd               268 mm/memory-failure.c 	pgd_t *pgd;
pgd               274 mm/memory-failure.c 	pgd = pgd_offset(vma->vm_mm, address);
pgd               275 mm/memory-failure.c 	if (!pgd_present(*pgd))
pgd               277 mm/memory-failure.c 	p4d = p4d_offset(pgd, address);
pgd               272 mm/memory.c    static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
pgd               281 mm/memory.c    	p4d = p4d_offset(pgd, addr);
pgd               300 mm/memory.c    	p4d = p4d_offset(pgd, start);
pgd               301 mm/memory.c    	pgd_clear(pgd);
pgd               312 mm/memory.c    	pgd_t *pgd;
pgd               361 mm/memory.c    	pgd = pgd_offset(tlb->mm, addr);
pgd               364 mm/memory.c    		if (pgd_none_or_clear_bad(pgd))
pgd               366 mm/memory.c    		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
pgd               367 mm/memory.c    	} while (pgd++, addr = next, addr != end);
pgd               484 mm/memory.c    	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pgd               485 mm/memory.c    	p4d_t *p4d = p4d_offset(pgd, addr);
pgd              1204 mm/memory.c    				struct vm_area_struct *vma, pgd_t *pgd,
pgd              1211 mm/memory.c    	p4d = p4d_offset(pgd, addr);
pgd              1227 mm/memory.c    	pgd_t *pgd;
pgd              1232 mm/memory.c    	pgd = pgd_offset(vma->vm_mm, addr);
pgd              1235 mm/memory.c    		if (pgd_none_or_clear_bad(pgd))
pgd              1237 mm/memory.c    		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
pgd              1238 mm/memory.c    	} while (pgd++, addr = next, addr != end);
pgd              1395 mm/memory.c    	pgd_t *pgd;
pgd              1400 mm/memory.c    	pgd = pgd_offset(mm, addr);
pgd              1401 mm/memory.c    	p4d = p4d_alloc(mm, pgd, addr);
pgd              1862 mm/memory.c    static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
pgd              1871 mm/memory.c    	p4d = p4d_alloc(mm, pgd, addr);
pgd              1899 mm/memory.c    	pgd_t *pgd;
pgd              1938 mm/memory.c    	pgd = pgd_offset(mm, addr);
pgd              1942 mm/memory.c    		err = remap_p4d_range(mm, pgd, addr, next,
pgd              1946 mm/memory.c    	} while (pgd++, addr = next, addr != end);
pgd              2077 mm/memory.c    static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
pgd              2085 mm/memory.c    	p4d = p4d_alloc(mm, pgd, addr);
pgd              2104 mm/memory.c    	pgd_t *pgd;
pgd              2112 mm/memory.c    	pgd = pgd_offset(mm, addr);
pgd              2115 mm/memory.c    		err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
pgd              2118 mm/memory.c    	} while (pgd++, addr = next, addr != end);
pgd              3921 mm/memory.c    	pgd_t *pgd;
pgd              3925 mm/memory.c    	pgd = pgd_offset(mm, address);
pgd              3926 mm/memory.c    	p4d = p4d_alloc(mm, pgd, address);
pgd              4049 mm/memory.c    int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
pgd              4058 mm/memory.c    	if (pgd_present(*pgd))		/* Another has populated it */
pgd              4061 mm/memory.c    		pgd_populate(mm, pgd, new);
pgd              4136 mm/memory.c    	pgd_t *pgd;
pgd              4142 mm/memory.c    	pgd = pgd_offset(mm, address);
pgd              4143 mm/memory.c    	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
pgd              4146 mm/memory.c    	p4d = p4d_offset(pgd, address);
pgd               282 mm/mprotect.c  		pgd_t *pgd, unsigned long addr, unsigned long end,
pgd               289 mm/mprotect.c  	p4d = p4d_offset(pgd, addr);
pgd               306 mm/mprotect.c  	pgd_t *pgd;
pgd               312 mm/mprotect.c  	pgd = pgd_offset(mm, addr);
pgd               317 mm/mprotect.c  		if (pgd_none_or_clear_bad(pgd))
pgd               319 mm/mprotect.c  		pages += change_p4d_range(vma, pgd, addr, next, newprot,
pgd               321 mm/mprotect.c  	} while (pgd++, addr = next, addr != end);
pgd                35 mm/mremap.c    	pgd_t *pgd;
pgd                40 mm/mremap.c    	pgd = pgd_offset(mm, addr);
pgd                41 mm/mremap.c    	if (pgd_none_or_clear_bad(pgd))
pgd                44 mm/mremap.c    	p4d = p4d_offset(pgd, addr);
pgd                62 mm/mremap.c    	pgd_t *pgd;
pgd                67 mm/mremap.c    	pgd = pgd_offset(mm, addr);
pgd                68 mm/mremap.c    	p4d = p4d_alloc(mm, pgd, addr);
pgd               142 mm/page_vma_mapped.c 	pgd_t *pgd;
pgd               167 mm/page_vma_mapped.c 	pgd = pgd_offset(mm, pvmw->address);
pgd               168 mm/page_vma_mapped.c 	if (!pgd_present(*pgd))
pgd               170 mm/page_vma_mapped.c 	p4d = p4d_offset(pgd, pvmw->address);
pgd               120 mm/pagewalk.c  static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
pgd               128 mm/pagewalk.c  	p4d = p4d_offset(pgd, addr);
pgd               150 mm/pagewalk.c  	pgd_t *pgd;
pgd               155 mm/pagewalk.c  	pgd = pgd_offset(walk->mm, addr);
pgd               158 mm/pagewalk.c  		if (pgd_none_or_clear_bad(pgd)) {
pgd               166 mm/pagewalk.c  			err = walk_p4d_range(pgd, addr, next, walk);
pgd               169 mm/pagewalk.c  	} while (pgd++, addr = next, addr != end);
pgd                21 mm/pgtable-generic.c void pgd_clear_bad(pgd_t *pgd)
pgd                23 mm/pgtable-generic.c 	pgd_ERROR(*pgd);
pgd                24 mm/pgtable-generic.c 	pgd_clear(pgd);
pgd               713 mm/rmap.c      	pgd_t *pgd;
pgd               719 mm/rmap.c      	pgd = pgd_offset(mm, address);
pgd               720 mm/rmap.c      	if (!pgd_present(*pgd))
pgd               723 mm/rmap.c      	p4d = p4d_offset(pgd, address);
pgd               192 mm/sparse-vmemmap.c p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
pgd               194 mm/sparse-vmemmap.c 	p4d_t *p4d = p4d_offset(pgd, addr);
pgd               206 mm/sparse-vmemmap.c 	pgd_t *pgd = pgd_offset_k(addr);
pgd               207 mm/sparse-vmemmap.c 	if (pgd_none(*pgd)) {
pgd               211 mm/sparse-vmemmap.c 		pgd_populate(&init_mm, pgd, p);
pgd               213 mm/sparse-vmemmap.c 	return pgd;
pgd               220 mm/sparse-vmemmap.c 	pgd_t *pgd;
pgd               227 mm/sparse-vmemmap.c 		pgd = vmemmap_pgd_populate(addr, node);
pgd               228 mm/sparse-vmemmap.c 		if (!pgd)
pgd               230 mm/sparse-vmemmap.c 		p4d = vmemmap_p4d_populate(pgd, addr, node);
pgd              2022 mm/swapfile.c  static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
pgd              2031 mm/swapfile.c  	p4d = p4d_offset(pgd, addr);
pgd              2047 mm/swapfile.c  	pgd_t *pgd;
pgd              2054 mm/swapfile.c  	pgd = pgd_offset(vma->vm_mm, addr);
pgd              2057 mm/swapfile.c  		if (pgd_none_or_clear_bad(pgd))
pgd              2059 mm/swapfile.c  		ret = unuse_p4d_range(vma, pgd, addr, next, type,
pgd              2063 mm/swapfile.c  	} while (pgd++, addr = next, addr != end);
pgd               149 mm/userfaultfd.c 	pgd_t *pgd;
pgd               153 mm/userfaultfd.c 	pgd = pgd_offset(mm, address);
pgd               154 mm/userfaultfd.c 	p4d = p4d_alloc(mm, pgd, address);
pgd               107 mm/vmalloc.c   static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
pgd               112 mm/vmalloc.c   	p4d = p4d_offset(pgd, addr);
pgd               125 mm/vmalloc.c   	pgd_t *pgd;
pgd               129 mm/vmalloc.c   	pgd = pgd_offset_k(addr);
pgd               132 mm/vmalloc.c   		if (pgd_none_or_clear_bad(pgd))
pgd               134 mm/vmalloc.c   		vunmap_p4d_range(pgd, addr, next);
pgd               135 mm/vmalloc.c   	} while (pgd++, addr = next, addr != end);
pgd               198 mm/vmalloc.c   static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
pgd               204 mm/vmalloc.c   	p4d = p4d_alloc(&init_mm, pgd, addr);
pgd               224 mm/vmalloc.c   	pgd_t *pgd;
pgd               231 mm/vmalloc.c   	pgd = pgd_offset_k(addr);
pgd               234 mm/vmalloc.c   		err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
pgd               237 mm/vmalloc.c   	} while (pgd++, addr = next, addr != end);
pgd               274 mm/vmalloc.c   	pgd_t *pgd = pgd_offset_k(addr);
pgd               286 mm/vmalloc.c   	if (pgd_none(*pgd))
pgd               288 mm/vmalloc.c   	p4d = p4d_offset(pgd, addr);
pgd                83 tools/testing/selftests/kvm/lib/aarch64/processor.c 		vm->pgd = paddr;
pgd               108 tools/testing/selftests/kvm/lib/aarch64/processor.c 	ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
pgd               155 tools/testing/selftests/kvm/lib/aarch64/processor.c 	ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
pgd               209 tools/testing/selftests/kvm/lib/aarch64/processor.c 	uint64_t pgd, *ptep;
pgd               214 tools/testing/selftests/kvm/lib/aarch64/processor.c 	for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
pgd               215 tools/testing/selftests/kvm/lib/aarch64/processor.c 		ptep = addr_gpa2hva(vm, pgd);
pgd               218 tools/testing/selftests/kvm/lib/aarch64/processor.c 		printf("%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
pgd               302 tools/testing/selftests/kvm/lib/aarch64/processor.c 	set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
pgd                61 tools/testing/selftests/kvm/lib/kvm_util_internal.h 	vm_paddr_t pgd;
pgd                32 tools/testing/selftests/kvm/lib/s390x/processor.c 	vm->pgd = paddr;
pgd                94 tools/testing/selftests/kvm/lib/s390x/processor.c 	entry = addr_gpa2hva(vm, vm->pgd);
pgd               138 tools/testing/selftests/kvm/lib/s390x/processor.c 	entry = addr_gpa2hva(vm, vm->pgd);
pgd               196 tools/testing/selftests/kvm/lib/s390x/processor.c 	virt_dump_region(stream, vm, indent, vm->pgd);
pgd               264 tools/testing/selftests/kvm/lib/s390x/processor.c 	sregs.crs[1] = vm->pgd | 0xf;		/* Primary region table */
pgd               238 tools/testing/selftests/kvm/lib/x86_64/processor.c 		vm->pgd = paddr;
pgd               290 tools/testing/selftests/kvm/lib/x86_64/processor.c 	pml4e = addr_gpa2hva(vm, vm->pgd);
pgd               359 tools/testing/selftests/kvm/lib/x86_64/processor.c 		vm->pgd);
pgd               560 tools/testing/selftests/kvm/lib/x86_64/processor.c 	pml4e = addr_gpa2hva(vm, vm->pgd);
pgd               640 tools/testing/selftests/kvm/lib/x86_64/processor.c 	sregs.cr3 = vm->pgd;
pgd               160 virt/kvm/arm/mmu.c static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
pgd               162 virt/kvm/arm/mmu.c 	pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
pgd               163 virt/kvm/arm/mmu.c 	stage2_pgd_clear(kvm, pgd);
pgd               166 virt/kvm/arm/mmu.c 	put_page(virt_to_page(pgd));
pgd               297 virt/kvm/arm/mmu.c static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
pgd               303 virt/kvm/arm/mmu.c 	start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
pgd               321 virt/kvm/arm/mmu.c 		clear_stage2_pgd_entry(kvm, pgd, start_addr);
pgd               337 virt/kvm/arm/mmu.c 	pgd_t *pgd;
pgd               344 virt/kvm/arm/mmu.c 	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
pgd               351 virt/kvm/arm/mmu.c 		if (!READ_ONCE(kvm->arch.pgd))
pgd               354 virt/kvm/arm/mmu.c 		if (!stage2_pgd_none(kvm, *pgd))
pgd               355 virt/kvm/arm/mmu.c 			unmap_stage2_puds(kvm, pgd, addr, next);
pgd               362 virt/kvm/arm/mmu.c 	} while (pgd++, addr = next, addr != end);
pgd               395 virt/kvm/arm/mmu.c static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
pgd               401 virt/kvm/arm/mmu.c 	pud = stage2_pud_offset(kvm, pgd, addr);
pgd               419 virt/kvm/arm/mmu.c 	pgd_t *pgd;
pgd               421 virt/kvm/arm/mmu.c 	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
pgd               424 virt/kvm/arm/mmu.c 		if (!stage2_pgd_none(kvm, *pgd))
pgd               425 virt/kvm/arm/mmu.c 			stage2_flush_puds(kvm, pgd, addr, next);
pgd               426 virt/kvm/arm/mmu.c 	} while (pgd++, addr = next, addr != end);
pgd               453 virt/kvm/arm/mmu.c static void clear_hyp_pgd_entry(pgd_t *pgd)
pgd               455 virt/kvm/arm/mmu.c 	pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
pgd               456 virt/kvm/arm/mmu.c 	pgd_clear(pgd);
pgd               458 virt/kvm/arm/mmu.c 	put_page(virt_to_page(pgd));
pgd               512 virt/kvm/arm/mmu.c static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
pgd               517 virt/kvm/arm/mmu.c 	start_pud = pud = pud_offset(pgd, addr);
pgd               526 virt/kvm/arm/mmu.c 		clear_hyp_pgd_entry(pgd);
pgd               537 virt/kvm/arm/mmu.c 	pgd_t *pgd;
pgd               545 virt/kvm/arm/mmu.c 	pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
pgd               548 virt/kvm/arm/mmu.c 		if (!pgd_none(*pgd))
pgd               549 virt/kvm/arm/mmu.c 			unmap_hyp_puds(pgd, addr, next);
pgd               550 virt/kvm/arm/mmu.c 	} while (pgd++, addr = next, addr != end);
pgd               659 virt/kvm/arm/mmu.c static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
pgd               670 virt/kvm/arm/mmu.c 		pud = pud_offset(pgd, addr);
pgd               696 virt/kvm/arm/mmu.c 	pgd_t *pgd;
pgd               705 virt/kvm/arm/mmu.c 		pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
pgd               707 virt/kvm/arm/mmu.c 		if (pgd_none(*pgd)) {
pgd               714 virt/kvm/arm/mmu.c 			kvm_pgd_populate(pgd, pud);
pgd               715 virt/kvm/arm/mmu.c 			get_page(virt_to_page(pgd));
pgd               719 virt/kvm/arm/mmu.c 		err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
pgd               781 virt/kvm/arm/mmu.c 	pgd_t *pgd = hyp_pgd;
pgd               814 virt/kvm/arm/mmu.c 		pgd = boot_hyp_pgd;
pgd               816 virt/kvm/arm/mmu.c 	ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
pgd               902 virt/kvm/arm/mmu.c 	pgd_t *pgd;
pgd               904 virt/kvm/arm/mmu.c 	if (kvm->arch.pgd != NULL) {
pgd               910 virt/kvm/arm/mmu.c 	pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
pgd               911 virt/kvm/arm/mmu.c 	if (!pgd)
pgd               914 virt/kvm/arm/mmu.c 	pgd_phys = virt_to_phys(pgd);
pgd               918 virt/kvm/arm/mmu.c 	kvm->arch.pgd = pgd;
pgd              1000 virt/kvm/arm/mmu.c 	void *pgd = NULL;
pgd              1003 virt/kvm/arm/mmu.c 	if (kvm->arch.pgd) {
pgd              1005 virt/kvm/arm/mmu.c 		pgd = READ_ONCE(kvm->arch.pgd);
pgd              1006 virt/kvm/arm/mmu.c 		kvm->arch.pgd = NULL;
pgd              1012 virt/kvm/arm/mmu.c 	if (pgd)
pgd              1013 virt/kvm/arm/mmu.c 		free_pages_exact(pgd, stage2_pgd_size(kvm));
pgd              1019 virt/kvm/arm/mmu.c 	pgd_t *pgd;
pgd              1022 virt/kvm/arm/mmu.c 	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
pgd              1023 virt/kvm/arm/mmu.c 	if (stage2_pgd_none(kvm, *pgd)) {
pgd              1027 virt/kvm/arm/mmu.c 		stage2_pgd_populate(kvm, pgd, pud);
pgd              1028 virt/kvm/arm/mmu.c 		get_page(virt_to_page(pgd));
pgd              1031 virt/kvm/arm/mmu.c 	return stage2_pud_offset(kvm, pgd, addr);
pgd              1476 virt/kvm/arm/mmu.c static void  stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
pgd              1482 virt/kvm/arm/mmu.c 	pud = stage2_pud_offset(kvm, pgd, addr);
pgd              1504 virt/kvm/arm/mmu.c 	pgd_t *pgd;
pgd              1507 virt/kvm/arm/mmu.c 	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
pgd              1519 virt/kvm/arm/mmu.c 		if (!READ_ONCE(kvm->arch.pgd))
pgd              1522 virt/kvm/arm/mmu.c 		if (stage2_pgd_present(kvm, *pgd))
pgd              1523 virt/kvm/arm/mmu.c 			stage2_wp_puds(kvm, pgd, addr, next);
pgd              1524 virt/kvm/arm/mmu.c 	} while (pgd++, addr = next, addr != end);
pgd              2054 virt/kvm/arm/mmu.c 	if (!kvm->arch.pgd)
pgd              2085 virt/kvm/arm/mmu.c 	if (!kvm->arch.pgd)
pgd              2139 virt/kvm/arm/mmu.c 	if (!kvm->arch.pgd)
pgd              2147 virt/kvm/arm/mmu.c 	if (!kvm->arch.pgd)
pgd              2172 virt/kvm/arm/mmu.c static int kvm_map_idmap_text(pgd_t *pgd)
pgd              2177 virt/kvm/arm/mmu.c 	err = 	__create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),