base_pfn          184 arch/powerpc/mm/init_64.c 	if (start_pfn < altmap->base_pfn)
base_pfn          277 arch/powerpc/mm/init_64.c 	unsigned long base_pfn;
base_pfn          281 arch/powerpc/mm/init_64.c 		alt_start = altmap->base_pfn;
base_pfn          282 arch/powerpc/mm/init_64.c 		alt_end = altmap->base_pfn + altmap->reserve +
base_pfn          306 arch/powerpc/mm/init_64.c 		base_pfn = PHYS_PFN(addr);
base_pfn          308 arch/powerpc/mm/init_64.c 		if (base_pfn >= alt_start && base_pfn < alt_end) {
base_pfn           98 arch/powerpc/platforms/powernv/memtrace.c 	u64 base_pfn;
base_pfn          112 arch/powerpc/platforms/powernv/memtrace.c 	for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
base_pfn          113 arch/powerpc/platforms/powernv/memtrace.c 		if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
base_pfn          120 arch/powerpc/platforms/powernv/memtrace.c 			end_pfn = base_pfn + nr_pages;
base_pfn          121 arch/powerpc/platforms/powernv/memtrace.c 			for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
base_pfn          125 arch/powerpc/platforms/powernv/memtrace.c 			return base_pfn << PAGE_SHIFT;
base_pfn           37 arch/x86/kernel/cpu/mtrr/cleanup.c 	unsigned long	base_pfn;
base_pfn           77 arch/x86/kernel/cpu/mtrr/cleanup.c 		base = range_state[i].base_pfn;
base_pfn           98 arch/x86/kernel/cpu/mtrr/cleanup.c 		base = range_state[i].base_pfn;
base_pfn          207 arch/x86/kernel/cpu/mtrr/cleanup.c 	range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
base_pfn          219 arch/x86/kernel/cpu/mtrr/cleanup.c 		basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10);
base_pfn          409 arch/x86/kernel/cpu/mtrr/cleanup.c set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
base_pfn          418 arch/x86/kernel/cpu/mtrr/cleanup.c 	basek = base_pfn << (PAGE_SHIFT - 10);
base_pfn          541 arch/x86/kernel/cpu/mtrr/cleanup.c 		start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
base_pfn          704 arch/x86/kernel/cpu/mtrr/cleanup.c 		range_state[i].base_pfn = base;
base_pfn          901 arch/x86/kernel/cpu/mtrr/cleanup.c 		range_state[i].base_pfn = base;
base_pfn          911 arch/x86/kernel/cpu/mtrr/cleanup.c 		base = range_state[i].base_pfn;
base_pfn          203 arch/x86/mm/numa_emulation.c 	unsigned long base_pfn = PHYS_PFN(base);
base_pfn          206 arch/x86/mm/numa_emulation.c 	return PFN_PHYS((max_pfn - base_pfn - hole_pfns) / nr_nodes);
base_pfn          141 drivers/gpu/drm/i915/gvt/kvmgt.c 	unsigned long base_pfn = 0;
base_pfn          171 drivers/gpu/drm/i915/gvt/kvmgt.c 			base_pfn = pfn;
base_pfn          172 drivers/gpu/drm/i915/gvt/kvmgt.c 		else if (base_pfn + npage != pfn) {
base_pfn          180 drivers/gpu/drm/i915/gvt/kvmgt.c 	*page = pfn_to_page(base_pfn);
base_pfn          305 drivers/iommu/dma-iommu.c 	unsigned long order, base_pfn;
base_pfn          316 drivers/iommu/dma-iommu.c 	base_pfn = max_t(unsigned long, 1, base >> order);
base_pfn          326 drivers/iommu/dma-iommu.c 		base_pfn = max_t(unsigned long, base_pfn,
base_pfn          333 drivers/iommu/dma-iommu.c 		    base_pfn != iovad->start_pfn) {
base_pfn          341 drivers/iommu/dma-iommu.c 	init_iova_domain(iovad, 1UL << order, base_pfn);
base_pfn          649 drivers/nvdimm/pfn_devs.c 	unsigned long base_pfn = PHYS_PFN(base);
base_pfn          651 drivers/nvdimm/pfn_devs.c 	return SUBSECTION_ALIGN_DOWN(base_pfn);
base_pfn          657 drivers/nvdimm/pfn_devs.c 	unsigned long base_pfn = PHYS_PFN(base);
base_pfn          659 drivers/nvdimm/pfn_devs.c 	reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
base_pfn          677 drivers/nvdimm/pfn_devs.c 		.base_pfn = init_altmap_base(base),
base_pfn          247 drivers/staging/media/ipu3/ipu3-dmamap.c 	unsigned long order, base_pfn;
base_pfn          254 drivers/staging/media/ipu3/ipu3-dmamap.c 	base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
base_pfn          255 drivers/staging/media/ipu3/ipu3-dmamap.c 	init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
base_pfn           19 include/linux/memremap.h 	const unsigned long base_pfn;
base_pfn           45 mm/cma.c       	return PFN_PHYS(cma->base_pfn);
base_pfn           73 mm/cma.c       	return (cma->base_pfn & ((1UL << align_order) - 1))
base_pfn           88 mm/cma.c       	bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
base_pfn           99 mm/cma.c       	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
base_pfn          116 mm/cma.c       		base_pfn = pfn;
base_pfn          128 mm/cma.c       		init_cma_reserved_pageblock(pfn_to_page(base_pfn));
base_pfn          214 mm/cma.c       	cma->base_pfn = PFN_DOWN(base);
base_pfn          462 mm/cma.c       		pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
base_pfn          525 mm/cma.c       	if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
base_pfn          528 mm/cma.c       	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
base_pfn            6 mm/cma.h       	unsigned long   base_pfn;
base_pfn          176 mm/cma_debug.c 			    &cma->base_pfn, &cma_debugfs_fops);
base_pfn          298 mm/memory_hotplug.c 		if (altmap->base_pfn != pfn
base_pfn         5899 mm/page_alloc.c 		if (start_pfn == altmap->base_pfn)
base_pfn         5901 mm/page_alloc.c 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
base_pfn         5967 mm/page_alloc.c 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
base_pfn           85 mm/sparse-vmemmap.c 	return altmap->base_pfn + altmap->reserve + altmap->alloc