Searched refs:PAGE_SHIFT (Results 1 - 200 of 1198) sorted by relevance

123456

/linux-4.1.27/include/linux/
H A Dpfn.h9 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
10 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
11 #define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
H A Dnfsacl.h19 >> PAGE_SHIFT)
H A Dvmacache.h11 #define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK)
/linux-4.1.27/arch/arc/include/uapi/asm/
H A Dpage.h12 /* PAGE_SHIFT determines the page size */
14 #define PAGE_SHIFT 14 macro
16 #define PAGE_SHIFT 12 macro
21 * user code (busybox appletlib.h) expects PAGE_SHIFT to be defined w/o
25 #define PAGE_SHIFT 13 macro
29 #define PAGE_SIZE (1 << PAGE_SHIFT)
32 #define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */
/linux-4.1.27/arch/sparc/include/asm/
H A Dpage.h4 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
H A Dhighmem.h43 #define PKMAP_SIZE (LAST_PKMAP << PAGE_SHIFT)
44 #define PKMAP_BASE PMD_ALIGN(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
47 #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
48 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
H A Dpage_64.h6 #define PAGE_SHIFT 13 macro
8 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
14 #if PAGE_SHIFT < 14
26 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
138 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
140 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
142 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
H A Dio-unit.h44 unsigned long bmap[(IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 3)) / sizeof(unsigned long)];
52 #define IOUNIT_BMAP1_END (IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 1))
54 #define IOUNIT_BMAP2_END IOUNIT_BMAP2_START + (IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 2))
56 #define IOUNIT_BMAPM_END ((IOUNIT_DMA_SIZE - IOUNIT_DVMA_SIZE) >> PAGE_SHIFT)
H A Dpage_32.h13 #define PAGE_SHIFT 12 macro
14 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
130 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
133 #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
H A Dthread_info_64.h102 #if PAGE_SHIFT == 13
104 #define THREAD_SHIFT (PAGE_SHIFT + 1)
105 #else /* PAGE_SHIFT == 13 */
107 #define THREAD_SHIFT PAGE_SHIFT
108 #endif /* PAGE_SHIFT == 13 */
130 #if PAGE_SHIFT == 13
132 #else /* PAGE_SHIFT == 13 */
134 #endif /* PAGE_SHIFT == 13 */
H A Dvaddrs.h50 #define FIXADDR_SIZE ((FIX_KMAP_END + 1) << PAGE_SHIFT)
53 #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
H A Dtsb.h159 srlx REG2, 64 - PAGE_SHIFT, REG2; \
164 srlx REG2, 64 - PAGE_SHIFT, REG2; \
176 srlx REG2, 64 - PAGE_SHIFT, REG2; \
191 srlx REG2, 64 - PAGE_SHIFT, REG2; \
237 srlx REG2, 64 - PAGE_SHIFT, REG2; \
242 srlx REG2, 64 - PAGE_SHIFT, REG2; \
247 srlx REG2, 64 - PAGE_SHIFT, REG2; \
252 srlx REG2, 64 - PAGE_SHIFT, REG2; \
311 srlx VADDR, PAGE_SHIFT, REG2; \
/linux-4.1.27/arch/powerpc/kernel/
H A Dsuspend.c20 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; pfn_is_nosave()
21 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; pfn_is_nosave()
/linux-4.1.27/arch/blackfin/include/asm/
H A Dpage.h10 #define ARCH_PFN_OFFSET (CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT)
11 #define MAP_NR(addr) ((unsigned long)(addr) >> PAGE_SHIFT)
/linux-4.1.27/arch/ia64/include/asm/
H A Dsparsemem.h13 #if ((CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS)
15 #define SECTION_SIZE_BITS (CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT)
H A Dpage.h27 * PAGE_SHIFT determines the actual kernel page size.
30 # define PAGE_SHIFT 12 macro
32 # define PAGE_SHIFT 13 macro
34 # define PAGE_SHIFT 14 macro
36 # define PAGE_SHIFT 16 macro
41 #define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
95 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
123 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
124 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
125 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
150 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
151 # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
162 order = order - PAGE_SHIFT - 0xffff + 1; get_order()
H A Dmmzone.h23 int nid = paddr_to_nid(pfn << PAGE_SHIFT); pfn_to_nid()
/linux-4.1.27/arch/parisc/include/asm/
H A Dmmzone.h28 #define PFNNID_SHIFT (30 - PAGE_SHIFT)
33 #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
36 #define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
H A Dpage.h7 # define PAGE_SHIFT 12 macro
9 # define PAGE_SHIFT 14 macro
11 # define PAGE_SHIFT 16 macro
15 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
151 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
154 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
156 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
157 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
/linux-4.1.27/arch/openrisc/include/asm/
H A Dpage.h23 /* PAGE_SHIFT determines the page size */
25 #define PAGE_SHIFT 13 macro
27 #define PAGE_SIZE (1 << PAGE_SHIFT)
29 #define PAGE_SIZE (1UL << PAGE_SHIFT)
82 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
83 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
86 (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
88 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
90 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
H A Dfixmap.h52 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
56 #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
57 #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
H A Dpgtable.h60 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))
70 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))
72 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))
218 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
354 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
358 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
371 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
394 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
404 #define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT)
405 #define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
/linux-4.1.27/arch/sparc/mm/
H A Dinit_32.c63 get_nr_swap_pages() << (PAGE_SHIFT-10)); show_mem()
77 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; calc_highpages()
78 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; calc_highpages()
95 unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); calc_max_low_pfn()
98 last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT; calc_max_low_pfn()
100 curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; calc_max_low_pfn()
108 last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; calc_max_low_pfn()
151 start_pfn >>= PAGE_SHIFT; bootmem_init() local
155 max_pfn = end_of_phys_memory >> PAGE_SHIFT; bootmem_init()
160 if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) { bootmem_init()
161 highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); bootmem_init()
164 calc_highpages() >> (20 - PAGE_SHIFT)); bootmem_init()
181 if (initrd_start >= (start_pfn << PAGE_SHIFT) && bootmem_init()
182 initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) bootmem_init()
183 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; bootmem_init()
198 curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; bootmem_init()
202 last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; bootmem_init()
213 size = (last_pfn - curr_pfn) << PAGE_SHIFT; bootmem_init()
224 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; bootmem_init()
231 size = (start_pfn << PAGE_SHIFT) - phys_base; bootmem_init()
233 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; bootmem_init()
240 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT); bootmem_init()
241 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; bootmem_init()
306 i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); mem_init()
320 high_memory = __va(max_low_pfn << PAGE_SHIFT); mem_init()
324 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; mem_init()
325 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; mem_init()
H A Diommu.c122 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT; sbus_iommu_init()
189 busa0 = iommu->start + (ioptex << PAGE_SHIFT); iommu_get_one()
215 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_get_scsi_one()
245 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_get_scsi_sgl_gflush()
260 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_get_scsi_sgl_pflush()
290 ioptex = (busa - iommu->start) >> PAGE_SHIFT; iommu_release_one()
305 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_release_scsi_one()
316 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_release_scsi_sgl()
338 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, iommu_map_dma_area()
339 addr >> PAGE_SHIFT); iommu_map_dma_area()
383 iommu_flush_iotlb(first, len >> PAGE_SHIFT); iommu_map_dma_area()
387 *pba = iommu->start + (ioptex << PAGE_SHIFT); iommu_map_dma_area()
396 int ioptex = (busa - iommu->start) >> PAGE_SHIFT; iommu_unmap_dma_area()
409 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT); iommu_unmap_dma_area()
H A Dio-unit.c100 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; iounit_get_area()
133 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); iounit_get_area()
175 len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT; iounit_release_scsi_one()
176 vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; iounit_release_scsi_one()
192 len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT; iounit_release_scsi_sgl()
193 vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; iounit_release_scsi_sgl()
228 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); iounit_map_dma_area()
H A Dgup.c84 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); gup_huge_pmd()
174 len = (unsigned long) nr_pages << PAGE_SHIFT; __get_user_pages_fast()
204 len = (unsigned long) nr_pages << PAGE_SHIFT; get_user_pages_fast()
239 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); get_user_pages_fast()
249 start += nr << PAGE_SHIFT; get_user_pages_fast()
253 (end - start) >> PAGE_SHIFT, write, 0, pages); get_user_pages_fast()
/linux-4.1.27/arch/mips/include/asm/
H A Dmmzone.h13 #define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT)
H A Dpage.h18 * PAGE_SHIFT determines the page size
21 #define PAGE_SHIFT 12 macro
24 #define PAGE_SHIFT 13 macro
27 #define PAGE_SHIFT 14 macro
30 #define PAGE_SHIFT 15 macro
33 #define PAGE_SHIFT 16 macro
35 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
36 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
54 return (PAGE_SHIFT - 10) / 2; page_size_ftlb()
62 #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
65 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
195 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
H A Dtlb.h22 ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \
H A Dhighmem.h41 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
42 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
H A Dpgtable-64.h48 #define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
52 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
57 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
228 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
229 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
255 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
266 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
H A Dpgtable-32.h42 #define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
50 #define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
126 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
127 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
148 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/linux-4.1.27/arch/m68k/include/asm/
H A Dpage.h8 /* PAGE_SHIFT determines the page size */
10 #define PAGE_SHIFT 13 macro
12 #define PAGE_SHIFT 12 macro
14 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
H A Dpage_no.h25 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
26 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
28 #define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
29 #define page_to_virt(page) __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
H A Dvirtconvert.h31 __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
33 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
H A Da.out-core.h31 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; aout_dump_thread()
33 (PAGE_SIZE-1))) >> PAGE_SHIFT; aout_dump_thread()
38 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; aout_dump_thread()
H A Dmcf_pgtable.h190 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
352 #define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
398 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
403 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
404 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
/linux-4.1.27/include/asm-generic/
H A Dgetorder.h18 size >>= PAGE_SHIFT; __get_order() local
52 ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
53 (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
54 ilog2((n) - 1) - PAGE_SHIFT + 1) \
H A Dpage.h13 /* PAGE_SHIFT determines the page size */
15 #define PAGE_SHIFT 12 macro
17 #define PAGE_SIZE (1 << PAGE_SHIFT)
19 #define PAGE_SIZE (1UL << PAGE_SHIFT)
75 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
83 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
84 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
90 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
H A Dfixmap.h20 #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
21 #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
/linux-4.1.27/arch/mn10300/include/asm/
H A Dpage.h14 /* PAGE_SHIFT determines the page size */
15 #define PAGE_SHIFT 12 macro
18 #define PAGE_SIZE (1UL << PAGE_SHIFT)
21 #define PAGE_SIZE +(1 << PAGE_SHIFT) /* unary plus marks an
49 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
82 size = (size - 1) >> (PAGE_SHIFT - 1); get_order()
103 #define __pfn_disp (CONFIG_KERNEL_RAM_BASE_ADDRESS >> PAGE_SHIFT)
107 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
117 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
118 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
119 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/linux-4.1.27/arch/metag/include/asm/
H A Dpage.h8 /* PAGE_SHIFT determines the page size */
10 #define PAGE_SHIFT 12 macro
12 #define PAGE_SHIFT 13 macro
14 #define PAGE_SHIFT 14 macro
17 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
45 # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
108 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
109 #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
110 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
111 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
118 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
H A Dhighmem.h29 #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
30 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
H A Dpgtable.h104 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
106 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
170 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
194 /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */
196 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/linux-4.1.27/arch/cris/include/asm/
H A Dpage.h7 /* PAGE_SHIFT determines the page size */
8 #define PAGE_SHIFT 13 macro
9 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
42 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
43 #define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr)
53 #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT))
54 #define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT)
60 #define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
H A Dpgtable.h51 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))
61 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))
62 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))
90 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
213 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
217 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
228 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
245 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
252 #define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)
253 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/linux-4.1.27/arch/alpha/include/asm/
H A Dmmzone.h38 (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
44 temp = p >> PAGE_SHIFT; PLAT_NODE_DATA_LOCALNR()
67 << PAGE_SHIFT))
72 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
94 kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
101 (page_to_pfn(page) << PAGE_SHIFT)
103 #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
108 #define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
H A Dpage.h7 /* PAGE_SHIFT determines the page size */
8 #define PAGE_SHIFT 13 macro
9 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
86 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
89 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
H A Dpgtable.h31 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
36 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
44 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
45 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
46 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
51 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
170 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
193 #define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT)
195 ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
206 #define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT)
226 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
229 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
235 return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET; pmd_page_vaddr()
244 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
314 + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1)); pte_offset_kernel()
H A Da.out-core.h32 >> PAGE_SHIFT); aout_dump_thread()
34 >> PAGE_SHIFT); aout_dump_thread()
36 + PAGE_SIZE-1) >> PAGE_SHIFT; aout_dump_thread()
/linux-4.1.27/arch/sh/mm/
H A Dnuma.c36 start_pfn = start >> PAGE_SHIFT; setup_bootmem_node()
37 end_pfn = end >> PAGE_SHIFT; setup_bootmem_node()
57 bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT, setup_bootmem_node()
59 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, setup_bootmem_node() local
65 reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, setup_bootmem_node() local
68 bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); setup_bootmem_node() local
H A Dmmap.c28 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; COLOUR_ALIGN()
46 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) arch_get_unmapped_area()
75 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area()
95 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) arch_get_unmapped_area_topdown()
125 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area_topdown()
H A Dinit.c82 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); set_pte_phys()
204 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); allocate_pgdat()
238 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); bootmem_init_one_node()
242 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); bootmem_init_one_node()
346 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; paging_init()
347 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; paging_init()
417 __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); mem_init()
491 unsigned long start_pfn = start >> PAGE_SHIFT; arch_add_memory()
492 unsigned long nr_pages = size >> PAGE_SHIFT; arch_add_memory()
520 unsigned long start_pfn = start >> PAGE_SHIFT; arch_remove_memory()
521 unsigned long nr_pages = size >> PAGE_SHIFT; arch_remove_memory()
H A Dkmap.c42 (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) + kmap_coherent()
H A Dioremap_fixed.c78 nrpages = size >> PAGE_SHIFT; ioremap_fixed()
121 nrpages = map->size >> PAGE_SHIFT; iounmap_fixed()
H A Dgup.c175 len = (unsigned long) nr_pages << PAGE_SHIFT; __get_user_pages_fast()
228 len = (unsigned long) nr_pages << PAGE_SHIFT; get_user_pages_fast()
247 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); get_user_pages_fast()
257 start += nr << PAGE_SHIFT; get_user_pages_fast()
261 (end - start) >> PAGE_SHIFT, write, 0, pages); get_user_pages_fast()
/linux-4.1.27/arch/avr32/include/asm/
H A Dpage.h13 /* PAGE_SHIFT determines the page size */
14 #define PAGE_SHIFT 12 macro
15 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
53 size = (size - 1) >> PAGE_SHIFT; get_order()
79 #define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
81 #define phys_to_page(phys) (pfn_to_page(phys >> PAGE_SHIFT))
82 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
86 #define PHYS_PFN_OFFSET (CONFIG_PHYS_OFFSET >> PAGE_SHIFT)
93 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
94 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
H A Dpgtable-2level.h41 #define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
42 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
43 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/linux-4.1.27/arch/xtensa/include/asm/
H A Dpage.h29 * PAGE_SHIFT determines the page size
32 #define PAGE_SHIFT 12 macro
33 #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
75 # define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
77 # define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
86 # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
88 # define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
128 asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT)); get_order()
170 #define ARCH_PFN_OFFSET (PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT)
181 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
182 #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
183 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
184 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
H A Dfixmap.h48 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
51 #define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
52 #define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
H A Dhighmem.h25 #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
26 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/arch/x86/include/asm/
H A Dpage_types.h7 /* PAGE_SHIFT determines the page size */
8 #define PAGE_SHIFT 12 macro
9 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
26 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
58 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; get_max_mapped()
H A Dboot.h17 #define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER)
H A Da.out-core.h31 dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; aout_dump_thread()
33 >> PAGE_SHIFT; aout_dump_thread()
40 >> PAGE_SHIFT; aout_dump_thread()
H A Dpage.h63 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
64 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
H A Dpgalloc.h65 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); pmd_populate_kernel()
75 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); pmd_populate()
114 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); pud_populate()
122 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); pgd_populate()
H A Dhighmem.h57 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
58 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/arch/powerpc/boot/
H A Dpage.h19 /* PAGE_SHIFT determines the page size */
20 #define PAGE_SHIFT 12 macro
21 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
/linux-4.1.27/arch/hexagon/include/asm/
H A Dpage.h29 #define PAGE_SHIFT 12 macro
34 #define PAGE_SHIFT 14 macro
39 #define PAGE_SHIFT 16 macro
44 #define PAGE_SHIFT 18 macro
49 #define PAGE_SHIFT 20 macro
62 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
66 #define PAGE_SIZE (1UL << PAGE_SHIFT)
67 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
113 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
141 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
143 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
144 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
H A Dmem-layout.h51 #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
89 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
99 #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
100 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/arch/x86/kernel/cpu/mtrr/
H A Dcentaur.c60 *base = centaur_mcr[reg].high >> PAGE_SHIFT; centaur_get_mcr()
61 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; centaur_get_mcr()
82 high = base << PAGE_SHIFT; centaur_set_mcr()
85 low = -size << PAGE_SHIFT | 0x1f; centaur_set_mcr()
88 low = -size << PAGE_SHIFT | 0x02; /* NC */ centaur_set_mcr()
90 low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */ centaur_set_mcr()
H A Damd.c19 *base = (low & 0xFFFE0000) >> PAGE_SHIFT; amd_get_mtrr()
45 *size = (low + 4) << (15 - PAGE_SHIFT); amd_get_mtrr()
82 regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) amd_set_mtrr()
83 | (base << PAGE_SHIFT) | (type + 1); amd_set_mtrr()
105 if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) amd_validate_add_page()
H A Dif.c54 base >>= PAGE_SHIFT; mtrr_file_add()
55 size >>= PAGE_SHIFT; mtrr_file_add() local
73 base >>= PAGE_SHIFT; mtrr_file_del()
74 size >>= PAGE_SHIFT; mtrr_file_del() local
155 base >>= PAGE_SHIFT; mtrr_write() local
156 size >>= PAGE_SHIFT; mtrr_write() local
274 if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) mtrr_ioctl()
275 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) mtrr_ioctl()
278 gentry.base = base << PAGE_SHIFT; mtrr_ioctl()
279 gentry.size = size << PAGE_SHIFT; mtrr_ioctl()
418 if (size < (0x100000 >> PAGE_SHIFT)) { mtrr_seq_show()
421 size <<= PAGE_SHIFT - 10; mtrr_seq_show()
424 size >>= 20 - PAGE_SHIFT; mtrr_seq_show()
428 i, base, base >> (20 - PAGE_SHIFT), mtrr_seq_show()
H A Dcleanup.c100 if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && x86_get_mtrr_mem_range()
104 if (base + size <= (1<<(20-PAGE_SHIFT))) x86_get_mtrr_mem_range()
106 size -= (1<<(20-PAGE_SHIFT)) - base; x86_get_mtrr_mem_range()
107 base = 1<<(20-PAGE_SHIFT); x86_get_mtrr_mem_range()
207 range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); save_var_mtrr()
208 range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); save_var_mtrr()
219 basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10); set_var_mtrr_all()
220 sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10); set_var_mtrr_all()
419 basek = base_pfn << (PAGE_SHIFT - 10); set_var_mtrr_range()
420 sizek = size_pfn << (PAGE_SHIFT - 10); set_var_mtrr_range()
523 #define PSHIFT (PAGE_SHIFT - 10)
537 size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); print_out_mtrr_range_state()
542 start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); print_out_mtrr_range_state()
713 x_remove_base = 1 << (32 - PAGE_SHIFT); mtrr_cleanup()
715 x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; mtrr_cleanup()
722 1ULL<<(20 - PAGE_SHIFT)); mtrr_cleanup()
729 range_sums >> (20 - PAGE_SHIFT)); mtrr_cleanup()
851 trim_start <<= PAGE_SHIFT; real_trim_memory() local
854 trim_size <<= PAGE_SHIFT; real_trim_memory() local
941 range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT)); mtrr_trim_uncached_memory()
942 range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT; mtrr_trim_uncached_memory()
H A Dgeneric.c71 mask >>= PAGE_SHIFT; get_mtrr_size() local
74 size <<= PAGE_SHIFT; get_mtrr_size() local
364 high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4; print_mtrr_state()
536 tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; generic_get_mtrr()
556 *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; generic_get_mtrr()
596 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != set_mtrr_var_ranges()
597 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { set_mtrr_var_ranges()
606 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != set_mtrr_var_ranges()
607 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { set_mtrr_var_ranges()
770 vr->base_lo = base << PAGE_SHIFT | type; generic_set_mtrr()
771 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); generic_set_mtrr()
772 vr->mask_lo = -size << PAGE_SHIFT | 0x800; generic_set_mtrr()
773 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); generic_set_mtrr()
795 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { generic_validate_add_page()
/linux-4.1.27/arch/m32r/include/asm/
H A Dpage.h6 /* PAGE_SHIFT determines the page size */
7 #define PAGE_SHIFT 12 macro
8 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
73 #define PFN_BASE (CONFIG_MEMORY_START >> PAGE_SHIFT)
78 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
79 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
H A Dpgtable-2level.h58 #define pgd_page(pgd) (mem_map + ((pgd_val(pgd) >> PAGE_SHIFT) - PFN_BASE))
70 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
71 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
72 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
H A Dmmzone.h18 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
/linux-4.1.27/arch/sh/kernel/
H A Dswsusp.c24 unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; pfn_is_nosave()
25 unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; pfn_is_nosave()
H A Dsys_sh.c37 return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT); old_mmap()
48 if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) sys_mmap2()
51 pgoff >>= PAGE_SHIFT - 12; sys_mmap2()
/linux-4.1.27/arch/sh/include/mach-dreamcast/mach/
H A Dmaple.h9 #define MAPLE_DMA_PAGES ((MAPLE_DMA_ORDER > PAGE_SHIFT) ? \
10 MAPLE_DMA_ORDER - PAGE_SHIFT : 0)
/linux-4.1.27/arch/arm/mm/
H A Dtlb-v6.S41 mov r0, r0, lsr #PAGE_SHIFT @ align address
42 mov r1, r1, lsr #PAGE_SHIFT
44 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
45 mov r1, r1, lsl #PAGE_SHIFT
72 mov r0, r0, lsr #PAGE_SHIFT @ align address
73 mov r1, r1, lsr #PAGE_SHIFT
74 mov r0, r0, lsl #PAGE_SHIFT
75 mov r1, r1, lsl #PAGE_SHIFT
H A Dmmap.c16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
76 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) arch_get_unmapped_area()
101 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area()
130 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) arch_get_unmapped_area_topdown()
152 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area_topdown()
179 return rnd << PAGE_SHIFT; arch_mmap_rnd()
217 return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT)); valid_mmap_phys_addr_range()
233 if (iomem_is_exclusive(pfn << PAGE_SHIFT)) devmem_is_allowed()
H A Dcopypage-v6.c90 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); v6_copy_user_highpage_aliasing()
91 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); v6_copy_user_highpage_aliasing()
108 unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); v6_clear_user_highpage_aliasing()
H A Dcache-xsc3l2.c82 unsigned long pa_offset = pa << (32 - PAGE_SHIFT); l2_map_va()
83 if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) { l2_map_va()
90 va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); l2_map_va()
92 return va + (pa_offset >> (32 - PAGE_SHIFT)); l2_map_va()
/linux-4.1.27/arch/sh/include/asm/
H A Dpgtable-2level.h14 #define PTE_SHIFT PAGE_SHIFT
H A Dpgtable-3level.h21 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE))
H A Dpage.h10 /* PAGE_SHIFT determines the page size */
12 # define PAGE_SHIFT 12 macro
14 # define PAGE_SHIFT 13 macro
16 # define PAGE_SHIFT 14 macro
18 # define PAGE_SHIFT 16 macro
23 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
44 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
166 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
167 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
175 #define PFN_START (__MEMORY_START >> PAGE_SHIFT)
177 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
181 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/linux-4.1.27/arch/powerpc/include/asm/
H A Dfb.h11 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, fb_pgprotect()
H A Dpage_32.h23 #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */
25 #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
H A Dpgtable-ppc64-64k.h23 #define MIN_HUGEPTE_SHIFT PAGE_SHIFT
26 #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
H A Dpage.h28 #define PAGE_SHIFT 18 macro
30 #define PAGE_SHIFT 16 macro
32 #define PAGE_SHIFT 14 macro
34 #define PAGE_SHIFT 12 macro
37 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
43 #define HPAGE_SHIFT PAGE_SHIFT
47 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
52 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
56 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
126 #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
130 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
131 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
132 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
H A Dmmu-44x.h126 #if (PAGE_SHIFT == 12)
130 #elif (PAGE_SHIFT == 14)
134 #elif (PAGE_SHIFT == 16)
138 #elif (PAGE_SHIFT == 18)
H A Dhighmem.h58 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
59 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/arch/frv/include/asm/
H A Dmem-layout.h22 * PAGE_SHIFT determines the page size
24 #define PAGE_SHIFT 14 macro
27 #define PAGE_SIZE (1UL << PAGE_SHIFT)
29 #define PAGE_SIZE (1 << PAGE_SHIFT)
H A Dpage.h48 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
57 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
62 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
63 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
H A Dvirtconvert.h37 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
/linux-4.1.27/arch/s390/include/asm/
H A Dpage.h13 /* PAGE_SHIFT determines the page size */
14 #define PAGE_SHIFT 12 macro
15 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
23 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
146 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
147 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
148 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
H A Ddiag.h18 start_addr = start_pfn << PAGE_SHIFT; diag10_range()
19 end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; diag10_range()
/linux-4.1.27/arch/metag/mm/
H A Dnuma.c39 start_pfn = start >> PAGE_SHIFT; setup_bootmem_node()
40 end_pfn = end >> PAGE_SHIFT; setup_bootmem_node()
60 bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT, setup_bootmem_node()
62 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, setup_bootmem_node() local
71 bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); setup_bootmem_node() local
H A Dinit.c62 set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY)); insert_gateway_page()
120 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); allocate_pgdat()
159 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); bootmem_init_one_node()
163 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); bootmem_init_one_node()
186 << PAGE_SHIFT; for_each_memblock()
228 u64 base = min_low_pfn << PAGE_SHIFT; init_and_reserve_mem()
229 u64 size = (max_low_pfn << PAGE_SHIFT) - base; init_and_reserve_mem()
250 base = highstart_pfn << PAGE_SHIFT; init_and_reserve_mem()
251 size = (highend_pfn << PAGE_SHIFT) - base; init_and_reserve_mem()
/linux-4.1.27/arch/nios2/include/asm/
H A Dpage.h22 * PAGE_SHIFT determines the page size
24 #define PAGE_SHIFT 12 macro
25 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
87 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
89 # define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
/linux-4.1.27/arch/nios2/mm/
H A Dtlb.c24 << PAGE_SHIFT)
63 WRCTL(CTL_PTEADDR, (addr >> PAGE_SHIFT) << 2); flush_tlb_one_pid()
75 if (((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) && flush_tlb_one_pid()
87 WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT)); flush_tlb_one_pid()
127 WRCTL(CTL_PTEADDR, (addr >> PAGE_SHIFT) << 2); flush_tlb_one()
138 if ((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) { flush_tlb_one()
150 WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT)); flush_tlb_one()
163 line << (PAGE_SHIFT + cpuinfo.tlb_num_ways_log2)); dump_tlb_line()
180 if ((tlbacc << PAGE_SHIFT) != (MAX_PHYS_ADDR & PAGE_MASK)) { dump_tlb_line()
183 (pteaddr << (PAGE_SHIFT-2)), dump_tlb_line()
184 (tlbacc << PAGE_SHIFT), dump_tlb_line()
236 (MAX_PHYS_ADDR >> PAGE_SHIFT)); flush_tlb_pid()
260 WRCTL(CTL_PTEADDR, ((vaddr) >> PAGE_SHIFT) << 2); flush_tlb_all()
262 WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT)); flush_tlb_all()
/linux-4.1.27/tools/testing/selftests/vm/
H A Dtranshuge-stress.c19 #define PAGE_SHIFT 12 macro
22 #define PAGE_SIZE (1 << PAGE_SHIFT)
47 (uintptr_t)ptr >> (PAGE_SHIFT - 3)) != sizeof(ent)) allocate_transhuge()
52 !(PAGEMAP_PFN(ent[0]) & ((1 << (HPAGE_SHIFT - PAGE_SHIFT)) - 1))) allocate_transhuge()
82 len >> (20 + HPAGE_SHIFT - PAGE_SHIFT - 1)); main()
117 size_t idx = pfn >> (HPAGE_SHIFT - PAGE_SHIFT); main()
/linux-4.1.27/drivers/base/
H A Ddma-coherent.c26 int pages = size >> PAGE_SHIFT; dma_init_coherent_memory()
130 pos = (device_addr - mem->device_base) >> PAGE_SHIFT; dma_mark_declared_memory_occupied()
136 return mem->virt_base + (pos << PAGE_SHIFT); dma_mark_declared_memory_occupied()
172 if (unlikely(size > (mem->size << PAGE_SHIFT))) dma_alloc_from_coherent()
182 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); dma_alloc_from_coherent()
183 *ret = mem->virt_base + (pageno << PAGE_SHIFT); dma_alloc_from_coherent()
218 (mem->virt_base + (mem->size << PAGE_SHIFT))) { dma_release_from_coherent()
219 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; dma_release_from_coherent()
252 (mem->virt_base + (mem->size << PAGE_SHIFT))) { dma_mmap_from_coherent()
254 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; dma_mmap_from_coherent()
255 int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; dma_mmap_from_coherent()
256 int count = size >> PAGE_SHIFT; dma_mmap_from_coherent()
262 user_count << PAGE_SHIFT, dma_mmap_from_coherent()
/linux-4.1.27/arch/m68k/sun3/
H A Ddvma.c36 if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) { dvma_page()
38 ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte; dvma_page()
/linux-4.1.27/sound/core/
H A Dsgbuf.c52 tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; snd_free_sgbuf_pages()
100 chunk <<= PAGE_SHIFT; snd_malloc_sgbuf_pages() local
110 chunk = tmpb.bytes >> PAGE_SHIFT; snd_malloc_sgbuf_pages()
149 start = ofs >> PAGE_SHIFT; snd_sgbuf_get_chunk_size()
150 end = (ofs + size - 1) >> PAGE_SHIFT; snd_sgbuf_get_chunk_size()
152 pg = sg->table[start].addr >> PAGE_SHIFT; snd_sgbuf_get_chunk_size()
158 if ((sg->table[start].addr >> PAGE_SHIFT) != pg) snd_sgbuf_get_chunk_size()
159 return (start << PAGE_SHIFT) - ofs; snd_sgbuf_get_chunk_size()
/linux-4.1.27/arch/tile/include/asm/
H A Dpage.h22 /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
24 #define PAGE_SHIFT 14 macro
27 #define PAGE_SHIFT 16 macro
30 #define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL macro
35 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
47 #define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
132 return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT); get_order()
137 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
153 #define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
250 ((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT); kaddr_to_pfn()
255 return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT); pfn_to_kaddr()
261 return ((phys_addr_t)pfn << PAGE_SHIFT) + virt_to_phys()
267 return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1)); phys_to_virt()
281 return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT; kaddr_to_pfn()
286 return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET); pfn_to_kaddr()
318 #define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
H A Dpgalloc.h26 #define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
32 #if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
33 #define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
39 #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
135 #if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
136 #define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
142 #define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
H A Dhighmem.h50 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
51 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
H A Dfixmap.h78 #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
79 #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
/linux-4.1.27/arch/um/kernel/
H A Dsyscall.c25 err = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); old_mmap()
/linux-4.1.27/fs/romfs/
H A Dmmap-nommu.c37 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; romfs_get_unmapped_area()
39 offset = pgoff << PAGE_SHIFT; romfs_get_unmapped_area()
41 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; romfs_get_unmapped_area()
48 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) romfs_get_unmapped_area()
/linux-4.1.27/arch/score/include/asm/
H A Dpage.h7 /* PAGE_SHIFT determines the page size */
8 #define PAGE_SHIFT (12) macro
9 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
79 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
H A Dfixmap.h58 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
61 #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
63 ((FIXADDR_TOP - ((x) & PAGE_MASK)) >> PAGE_SHIFT)
/linux-4.1.27/arch/arm64/include/asm/
H A Dpage.h22 /* PAGE_SHIFT determines the page size */
24 #define PAGE_SHIFT 16 macro
26 #define PAGE_SHIFT 12 macro
28 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
H A Dmemory.h86 #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
87 #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
132 #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
156 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
165 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
166 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
H A Dpgtable-hwdef.h19 #define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3))
25 #define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
35 #define PUD_SHIFT ((PAGE_SHIFT - 3) * 3 + 3)
45 #define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_PGTABLE_LEVELS + 3)
H A Dpgtable.h42 #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
48 SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
120 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
122 #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
129 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
244 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
304 #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
305 #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
309 #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
/linux-4.1.27/arch/avr32/mm/
H A Dcopy_page.S21 sub r10, r11, -(1 << PAGE_SHIFT)
/linux-4.1.27/arch/m32r/mm/
H A Dinit.c68 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; zone_sizes_init()
79 zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; zone_sizes_init()
80 zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT; zone_sizes_init()
81 start_pfn = __MEMORY_START >> PAGE_SHIFT; zone_sizes_init()
/linux-4.1.27/arch/m68k/coldfire/
H A Dm54xx.c94 num_pages = (_ramend - _rambase) >> PAGE_SHIFT; mcf54xx_bootmem_alloc()
98 min_low_pfn = _rambase >> PAGE_SHIFT; mcf54xx_bootmem_alloc()
99 start_pfn = memstart >> PAGE_SHIFT; mcf54xx_bootmem_alloc()
100 max_low_pfn = _ramend >> PAGE_SHIFT; mcf54xx_bootmem_alloc()
/linux-4.1.27/arch/x86/mm/
H A Dinit.c88 order = get_order((unsigned long)num << PAGE_SHIFT); alloc_low_pages()
97 ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, alloc_low_pages()
98 max_pfn_mapped << PAGE_SHIFT, alloc_low_pages()
103 pfn = ret >> PAGE_SHIFT; alloc_low_pages()
108 pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); alloc_low_pages()
114 adr = __va((pfn + i) << PAGE_SHIFT); alloc_low_pages()
118 return __va(pfn << PAGE_SHIFT); alloc_low_pages()
131 pgt_buf_start = base >> PAGE_SHIFT; early_alloc_pgt_buf()
133 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); early_alloc_pgt_buf()
193 mr[nr_range].start = start_pfn<<PAGE_SHIFT; save_mr()
194 mr[nr_range].end = end_pfn<<PAGE_SHIFT; save_mr()
218 if ((end >> PAGE_SHIFT) > max_low_pfn) adjust_range_page_size_mask()
372 if (start_pfn < (1UL<<(32-PAGE_SHIFT))) add_pfn_range_mapped()
374 min(end_pfn, 1UL<<(32-PAGE_SHIFT))); add_pfn_range_mapped()
411 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); init_memory_mapping()
413 return ret >> PAGE_SHIFT; init_memory_mapping()
447 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= init_range_memory_mapping()
448 min(end, (u64)pgt_buf_top<<PAGE_SHIFT); init_range_memory_mapping()
473 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); get_new_step_size()
501 min_pfn_mapped = real_end >> PAGE_SHIFT; memory_map_top_down()
520 min_pfn_mapped = last_start >> PAGE_SHIFT; memory_map_top_down()
549 min_pfn_mapped = start >> PAGE_SHIFT; memory_map_bottom_up()
581 end = max_pfn << PAGE_SHIFT; init_mem_mapping()
583 end = max_low_pfn << PAGE_SHIFT; init_mem_mapping()
621 early_memtest(0, max_pfn_mapped << PAGE_SHIFT); init_mem_mapping()
638 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) devmem_is_allowed()
669 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); free_init_pages()
676 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); free_init_pages()
677 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); free_init_pages()
H A Dphysaddr.c63 return pfn_valid(x >> PAGE_SHIFT); __virt_addr_valid()
78 VIRTUAL_BUG_ON((phys_addr >> PAGE_SHIFT) > max_low_pfn); __phys_addr()
94 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); __virt_addr_valid()
H A Dinit_32.c77 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); one_md_table_init()
100 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); one_page_table_init()
182 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); page_table_kmap_check()
187 paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); page_table_kmap_check()
268 start_pfn = start >> PAGE_SHIFT; kernel_physical_mapping_init()
269 end_pfn = end >> PAGE_SHIFT; kernel_physical_mapping_init()
293 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); kernel_physical_mapping_init()
301 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); kernel_physical_mapping_init()
325 pfn &= PMD_MASK >> PAGE_SHIFT; kernel_physical_mapping_init()
344 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); kernel_physical_mapping_init()
361 last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; kernel_physical_mapping_init()
470 for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) { native_pagetable_init()
471 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); native_pagetable_init()
496 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); native_pagetable_init()
557 highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; parse_highmem()
687 max_pfn_mapped<<PAGE_SHIFT); setup_bootmem_allocator()
688 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); setup_bootmem_allocator()
831 unsigned long start_pfn = start >> PAGE_SHIFT; arch_add_memory()
832 unsigned long nr_pages = size >> PAGE_SHIFT; arch_add_memory()
840 unsigned long start_pfn = start >> PAGE_SHIFT; arch_remove_memory()
841 unsigned long nr_pages = size >> PAGE_SHIFT; arch_remove_memory()
891 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); set_kernel_text_rw() local
905 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); set_kernel_text_ro() local
922 set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); mark_nxdata_nx() local
930 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); mark_rodata_ro() local
939 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); mark_rodata_ro() local
942 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); mark_rodata_ro() local
947 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); mark_rodata_ro() local
954 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); mark_rodata_ro() local
957 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); mark_rodata_ro() local
H A Dpageattr.c96 return __pa_symbol(_text) >> PAGE_SHIFT; highmap_start_pfn()
101 return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT; highmap_end_pfn()
262 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) static_protections()
278 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, static_protections()
279 __pa_symbol(__end_rodata) >> PAGE_SHIFT)) static_protections()
430 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; slow_virt_to_phys()
500 numpages = (nextpage_addr - address) >> PAGE_SHIFT; try_preserve_large_page()
539 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); try_preserve_large_page()
551 for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { try_preserve_large_page()
575 if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { try_preserve_large_page()
622 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; __split_large_page()
892 set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); populate_pte()
912 unsigned long pre_end = start + (num_pages << PAGE_SHIFT); populate_pmd()
916 cur_pages = (pre_end - start) >> PAGE_SHIFT; populate_pmd()
956 cur_pages += PMD_SIZE >> PAGE_SHIFT; populate_pmd()
982 end = start + (cpa->numpages << PAGE_SHIFT); populate_pud()
993 cur_pages = (pre_end - start) >> PAGE_SHIFT; populate_pud()
1029 cur_pages += PUD_SIZE >> PAGE_SHIFT; populate_pud()
1082 addr + (cpa->numpages << PAGE_SHIFT)); populate_pgd()
1110 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { __cpa_process_fault()
1112 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; __cpa_process_fault()
1236 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); cpa_process_alias()
1258 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { cpa_process_alias()
1277 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + cpa_process_alias()
1692 start = page_to_pfn(pages[i]) << PAGE_SHIFT; _set_pages_array()
1714 start = page_to_pfn(pages[i]) << PAGE_SHIFT; _set_pages_array()
1757 start = page_to_pfn(pages[i]) << PAGE_SHIFT; set_pages_array_wb()
1915 unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT)); kernel_unmap_pages_in_pgd()
H A Dinit_64.c391 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); cleanup_highmap()
435 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); phys_pte_init()
437 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot)); phys_pte_init()
503 pfn_pte((address & PMD_MASK) >> PAGE_SHIFT, phys_pmd_init()
576 pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT, phys_pud_init()
695 unsigned long start_pfn = start >> PAGE_SHIFT; arch_add_memory()
696 unsigned long nr_pages = size >> PAGE_SHIFT; arch_add_memory()
1019 unsigned long start_pfn = start >> PAGE_SHIFT; arch_remove_memory()
1020 unsigned long nr_pages = size >> PAGE_SHIFT; arch_remove_memory()
1087 set_memory_rw(start, (end - start) >> PAGE_SHIFT); set_kernel_text_rw()
1104 set_memory_ro(start, (end - start) >> PAGE_SHIFT); set_kernel_text_ro()
1118 set_memory_ro(start, (end - start) >> PAGE_SHIFT); mark_rodata_ro()
1135 set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); mark_rodata_ro()
1141 set_memory_rw(start, (end-start) >> PAGE_SHIFT); mark_rodata_ro()
1144 set_memory_ro(start, (end-start) >> PAGE_SHIFT); mark_rodata_ro()
1198 if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) { probe_memory_block_size()
1204 if ((max_pfn << PAGE_SHIFT) < (16UL << 32)) probe_memory_block_size()
1209 if (!((max_pfn << PAGE_SHIFT) & (bz - 1))) probe_memory_block_size()
1264 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, vmemmap_populate_hugepages()
/linux-4.1.27/arch/unicore32/include/asm/
H A Dpage.h15 /* PAGE_SHIFT determines the page size */
16 #define PAGE_SHIFT 12 macro
17 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
H A Dmemory.h66 #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
67 #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
89 #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
96 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
109 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
H A Dmemblock.h39 #define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
/linux-4.1.27/arch/arc/mm/
H A Dmmap.c21 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
51 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) arch_get_unmapped_area()
76 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area()
/linux-4.1.27/arch/um/include/asm/
H A Dpage.h12 /* PAGE_SHIFT determines the page size */
13 #define PAGE_SHIFT 12 macro
14 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
112 #define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
113 #define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
/linux-4.1.27/arch/c6x/mm/
H A Ddma-coherent.c54 return dma_base + (pos << PAGE_SHIFT); __alloc_dma_pages()
60 u32 pos = (addr - dma_base) >> PAGE_SHIFT; __free_dma_pages()
85 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); dma_alloc_coherent()
110 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); dma_free_coherent()
134 dma_pages = dma_size >> PAGE_SHIFT; coherent_mem_init()
H A Dinit.c52 zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT; paging_init()
54 __pa(PAGE_OFFSET) >> PAGE_SHIFT; paging_init()
/linux-4.1.27/arch/microblaze/include/asm/
H A Dpage.h25 /* PAGE_SHIFT determines the page size */
27 #define PAGE_SHIFT 16 macro
29 #define PAGE_SHIFT 14 macro
31 #define PAGE_SHIFT 12 macro
33 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
38 #define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
152 # define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
153 # define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
154 # define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
167 # define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
169 # define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
H A Dhighmem.h50 #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
51 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/arch/mips/mm/
H A Dtlb-r8k.c43 write_c0_vaddr(entry << PAGE_SHIFT); local_flush_tlb_all()
44 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); local_flush_tlb_all()
72 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; local_flush_tlb_range()
101 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); local_flush_tlb_range()
115 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; local_flush_tlb_kernel_range()
141 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); local_flush_tlb_kernel_range()
170 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); local_flush_tlb_page()
H A Dmmap.c50 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
77 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) arch_get_unmapped_area_common()
101 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area_common()
150 rnd <<= PAGE_SHIFT; arch_mmap_rnd() local
179 rnd = rnd << PAGE_SHIFT; brk_rnd()
H A Dgup.c86 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); gup_huge_pmd()
152 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); gup_huge_pud()
208 len = (unsigned long) nr_pages << PAGE_SHIFT; __get_user_pages_fast()
274 len = (unsigned long) nr_pages << PAGE_SHIFT; get_user_pages_fast()
294 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); get_user_pages_fast()
301 start += nr << PAGE_SHIFT; get_user_pages_fast()
305 (end - start) >> PAGE_SHIFT, get_user_pages_fast()
/linux-4.1.27/mm/
H A Dpercpu-km.c50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; pcpu_create_chunk()
80 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; pcpu_destroy_chunk()
102 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; pcpu_verify_alloc_info()
H A Dmremap.c269 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); move_vma()
317 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); move_vma()
321 vm_unacct_memory(excess >> PAGE_SHIFT); move_vma()
334 mm->locked_vm += new_len >> PAGE_SHIFT; move_vma()
363 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; vma_to_resize()
365 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) vma_to_resize()
371 locked = mm->locked_vm << PAGE_SHIFT; vma_to_resize()
378 if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) vma_to_resize()
382 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; vma_to_resize()
437 ((addr - vma->vm_start) >> PAGE_SHIFT), mremap_to()
537 int pages = (new_len - old_len) >> PAGE_SHIFT; SYSCALL_DEFINE5()
568 ((addr - vma->vm_start) >> PAGE_SHIFT), SYSCALL_DEFINE5()
H A Dmincore.c87 unsigned long nr = (end - addr) >> PAGE_SHIFT; __mincore_unmapped_range()
118 int nr = (end - addr) >> PAGE_SHIFT; mincore_pte_range()
189 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); do_mincore()
193 return (end - addr) >> PAGE_SHIFT; do_mincore()
236 pages = len >> PAGE_SHIFT; SYSCALL_DEFINE3()
264 start += retval << PAGE_SHIFT; SYSCALL_DEFINE3()
/linux-4.1.27/arch/x86/power/
H A Dhibernate_64.c68 mstart = pfn_mapped[i].start << PAGE_SHIFT; set_up_temporary_mappings()
69 mend = pfn_mapped[i].end << PAGE_SHIFT; set_up_temporary_mappings()
105 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; pfn_is_nosave()
106 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; pfn_is_nosave()
/linux-4.1.27/fs/logfs/
H A Ddev_mtd.c78 pgoff_t index = ofs >> PAGE_SHIFT; logfs_mtd_erase_mapping()
80 for (index = ofs >> PAGE_SHIFT; index < (ofs + len) >> PAGE_SHIFT; index++) { logfs_mtd_erase_mapping()
130 err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE, logfs_mtd_readpage()
162 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); logfs_mtd_find_first_sb()
180 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); logfs_mtd_find_last_sb()
195 err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, __logfs_mtd_writeseg()
226 __logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); logfs_mtd_writeseg()
H A Ddev_bdev.c152 __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); bdev_writeseg()
236 do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT); bdev_erase()
265 pgoff_t index = pos >> PAGE_SHIFT; bdev_find_last_sb()
/linux-4.1.27/arch/powerpc/mm/
H A Dsubpage-prot.c118 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); subpage_prot_clear()
120 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); subpage_prot_clear()
122 if (addr + (nw << PAGE_SHIFT) > next) subpage_prot_clear()
123 nw = (next - addr) >> PAGE_SHIFT; subpage_prot_clear()
212 if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32))) sys_subpage_prot()
239 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); sys_subpage_prot()
245 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); sys_subpage_prot()
247 if (addr + (nw << PAGE_SHIFT) > next) sys_subpage_prot()
248 nw = (next - addr) >> PAGE_SHIFT; sys_subpage_prot()
H A Dmem.c84 unsigned long paddr = (pfn << PAGE_SHIFT); page_is_ram()
120 unsigned long start_pfn = start >> PAGE_SHIFT; arch_add_memory()
121 unsigned long nr_pages = size >> PAGE_SHIFT; arch_add_memory()
139 unsigned long start_pfn = start >> PAGE_SHIFT; arch_remove_memory()
140 unsigned long nr_pages = size >> PAGE_SHIFT; arch_remove_memory()
194 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; initmem_init()
195 min_low_pfn = MEMORY_START >> PAGE_SHIFT; initmem_init()
197 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; initmem_init()
311 limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT); paging_init()
316 limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT); paging_init()
343 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; mem_init()
345 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; mem_init()
427 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); flush_dcache_icache_page()
563 if (iomem_is_exclusive(pfn << PAGE_SHIFT)) devmem_is_allowed()
H A Dmmap.c62 rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT)); arch_mmap_rnd()
64 rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT)); arch_mmap_rnd()
66 return rnd << PAGE_SHIFT; arch_mmap_rnd()
/linux-4.1.27/arch/arm64/mm/
H A Dmmap.c36 #define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1))
56 return rnd << PAGE_SHIFT; arch_mmap_rnd()
116 return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); valid_mmap_phys_addr_range()
131 if (iomem_is_exclusive(pfn << PAGE_SHIFT)) devmem_is_allowed()
H A Dhugetlbpage.c59 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); setup_hugepagesz()
61 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); setup_hugepagesz()
H A Ddma-mapping.c107 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, __dma_alloc_coherent()
135 size >> PAGE_SHIFT); __dma_free_coherent()
312 PAGE_SHIFT; __dma_common_mmap()
313 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; __dma_common_mmap()
314 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; __dma_common_mmap()
359 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; atomic_pool_init()
377 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); atomic_pool_init()
395 (void *)PAGE_SHIFT); atomic_pool_init()
/linux-4.1.27/arch/blackfin/kernel/
H A Ddma-mapping.c34 dma_pages = dma_size >> PAGE_SHIFT; dma_alloc_init()
44 return ((size - 1) >> PAGE_SHIFT) + 1; get_pages()
59 ret = dma_base + (start << PAGE_SHIFT); __alloc_dma_pages()
68 unsigned long page = (addr - dma_base) >> PAGE_SHIFT; __free_dma_pages()
/linux-4.1.27/arch/arm/include/asm/
H A Dpage.h13 /* PAGE_SHIFT determines the page size */
14 #define PAGE_SHIFT 12 macro
15 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
16 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
H A Dmemory.h126 #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
127 #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
169 #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
171 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
187 #define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
191 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
253 #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
266 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
294 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
H A Dpage-nommu.h14 #if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
H A Dhighmem.h9 #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
10 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/arch/parisc/mm/
H A Dinit.c115 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
189 size = (pmem_ranges[i].pages << PAGE_SHIFT); setup_bootmem()
190 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); setup_bootmem()
200 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; setup_bootmem()
201 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; setup_bootmem()
221 rsize = pmem_ranges[i].pages << PAGE_SHIFT; setup_bootmem()
227 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) setup_bootmem()
228 - (mem_max >> PAGE_SHIFT); setup_bootmem()
268 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT; setup_bootmem()
304 (start_pfn << PAGE_SHIFT), setup_bootmem()
305 (npages << PAGE_SHIFT) ); setup_bootmem()
306 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT; setup_bootmem()
331 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), setup_bootmem()
332 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT), setup_bootmem()
341 (pmem_holes[i].start_pfn << PAGE_SHIFT), setup_bootmem()
342 (pmem_holes[i].pages << PAGE_SHIFT), setup_bootmem()
429 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); map_pages()
597 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD mem_init()
600 high_memory = __va((max_pfn << PAGE_SHIFT)); mem_init()
714 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; pagetable_init()
715 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); pagetable_init()
716 size = pmem_ranges[range].pages << PAGE_SHIFT; pagetable_init()
/linux-4.1.27/arch/microblaze/kernel/
H A Dsys_microblaze.c43 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); SYSCALL_DEFINE6()
54 pgoff >> (PAGE_SHIFT - 12)); SYSCALL_DEFINE6()
/linux-4.1.27/arch/mips/ar7/
H A Dmemory.c63 pages = memsize() >> PAGE_SHIFT; prom_meminit()
64 add_memory_region(PHYS_OFFSET, pages << PAGE_SHIFT, BOOT_MEM_RAM); prom_meminit()
/linux-4.1.27/arch/sh/boot/romimage/
H A Dhead.S50 mov #(PAGE_SHIFT - 4), r4
69 mov #PAGE_SHIFT, r4
/linux-4.1.27/arch/mips/loongson/loongson-3/
H A Dnuma.c142 start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; szmem()
143 node_psize = (mem_size << 20) >> PAGE_SHIFT; szmem()
156 start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; szmem()
157 node_psize = (mem_size << 20) >> PAGE_SHIFT; szmem()
211 reserve_bootmem_node(NODE_DATA(node), start_pfn << PAGE_SHIFT, node_mem_init() local
212 ((freepfn - start_pfn) << PAGE_SHIFT) + bootmap_size, node_mem_init()
215 if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) { node_mem_init()
278 high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); mem_init()
/linux-4.1.27/arch/frv/kernel/
H A Dsys_frv.c39 if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) sys_mmap2()
43 pgoff >> (PAGE_SHIFT - 12)); sys_mmap2()
/linux-4.1.27/arch/sparc/kernel/
H A Dsys_sparc_32.c48 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) arch_get_unmapped_area()
65 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area()
105 pgoff >> (PAGE_SHIFT - 12)); sys_mmap2()
113 return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); sys_mmap()
124 (pgoff >> (PAGE_SHIFT - 12)), flags); sparc_remap_file_pages()
H A Dsys_sparc_64.c81 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); COLOR_ALIGN()
99 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) arch_get_unmapped_area()
130 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area()
163 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) arch_get_unmapped_area_topdown()
193 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area_topdown()
269 rnd = (val % (1UL << (23UL-PAGE_SHIFT))); mmap_rnd()
271 rnd = (val % (1UL << (30UL-PAGE_SHIFT))); mmap_rnd()
273 return rnd << PAGE_SHIFT; mmap_rnd()
458 retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); SYSCALL_DEFINE6()
/linux-4.1.27/arch/tile/kernel/
H A Dvdso.c72 int data_pages = sizeof(vdso_data_store) >> PAGE_SHIFT; vdso_init()
96 vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; vdso_init()
101 vdso32_pages = (vdso32_end - vdso32_start) >> PAGE_SHIFT; vdso_init()
154 (pages << PAGE_SHIFT) + setup_vdso_pages()
183 pages << PAGE_SHIFT, setup_vdso_pages()
/linux-4.1.27/arch/mips/sgi-ip27/
H A Dip27-memory.c34 #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
35 #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
290 return size >> PAGE_SHIFT; slot_psize_compute()
296 return size >> PAGE_SHIFT; slot_psize_compute()
375 if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) > for_each_online_node()
376 (slot0sz << PAGE_SHIFT)) { for_each_online_node()
400 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); node_mem_init()
415 reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, node_mem_init() local
416 ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size, node_mem_init()
480 high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); mem_init()
/linux-4.1.27/arch/ia64/mm/
H A Dinit.c335 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) ia64_mmu_init()
343 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) ia64_mmu_init()
351 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, ia64_mmu_init()
356 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || ia64_mmu_init()
447 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); create_mem_map_page_table()
448 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); create_mem_map_page_table()
469 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, create_mem_map_page_table()
489 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); virtual_memmap_init()
490 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); virtual_memmap_init()
582 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; find_max_min_low_pfn()
583 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; find_max_min_low_pfn()
585 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; find_max_min_low_pfn()
586 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT; find_max_min_low_pfn()
659 unsigned long start_pfn = start >> PAGE_SHIFT; arch_add_memory()
660 unsigned long nr_pages = size >> PAGE_SHIFT; arch_add_memory()
679 unsigned long start_pfn = start >> PAGE_SHIFT; arch_remove_memory()
680 unsigned long nr_pages = size >> PAGE_SHIFT; arch_remove_memory()
/linux-4.1.27/arch/xtensa/mm/
H A Dtlb.c30 int e = w + (i << PAGE_SHIFT); __flush_itlb_all()
43 int e = w + (i << PAGE_SHIFT); __flush_dtlb_all()
104 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { local_flush_tlb_range()
155 end - start < _TLB_ENTRIES << PAGE_SHIFT) { local_flush_tlb_kernel_range()
218 unsigned tlbidx = w | (e << PAGE_SHIFT); check_tlb_entry()
221 unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); check_tlb_entry()
242 struct page *p = pfn_to_page(r1 >> PAGE_SHIFT); check_tlb_entry()
H A Dinit.c258 pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT; bootmem_init()
261 pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT; bootmem_init()
269 max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ? bootmem_init()
270 max_pfn : MAX_MEM_PFN >> PAGE_SHIFT; bootmem_init()
275 bootmap_size <<= PAGE_SHIFT; bootmem_init() local
291 bootmap_start >> PAGE_SHIFT, bootmem_init()
298 if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) { bootmem_init()
299 unsigned long end = min(max_low_pfn << PAGE_SHIFT, bootmem_init()
336 high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); mem_init()
/linux-4.1.27/arch/s390/mm/
H A Dmmap.c43 return STACK_RND_MASK << PAGE_SHIFT; stack_maxrandom_size()
66 return (get_random_int() & 0x7ff) << PAGE_SHIFT; arch_mmap_rnd()
68 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT; arch_mmap_rnd()
119 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; arch_get_unmapped_area()
120 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area()
159 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; arch_get_unmapped_area_topdown()
160 info.align_offset = pgoff << PAGE_SHIFT; arch_get_unmapped_area_topdown()
/linux-4.1.27/arch/m68k/mm/
H A Dmotorola.c252 min_low_pfn = availmem >> PAGE_SHIFT; paging_init()
253 max_low_pfn = max_addr >> PAGE_SHIFT; paging_init()
261 availmem >> PAGE_SHIFT, paging_init()
262 addr >> PAGE_SHIFT, paging_init()
263 end >> PAGE_SHIFT); paging_init()
301 zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; paging_init()
303 m68k_memory[i].addr >> PAGE_SHIFT, NULL); paging_init()
H A Dsun3mmu.c88 zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; paging_init()
93 (__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL); paging_init()
/linux-4.1.27/drivers/xen/
H A Dxen-balloon.c46 #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
68 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. watch_target()
70 balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); watch_target()
157 balloon_set_new_target(target_bytes >> PAGE_SHIFT); store_target_kb()
171 << PAGE_SHIFT); show_target()
187 balloon_set_new_target(target_bytes >> PAGE_SHIFT); store_target()
H A Dprivcmd.c210 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) || mmap_mfn_range()
211 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va)) mmap_mfn_range()
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) mmap_mfn_range()
227 st->va += msg->npages << PAGE_SHIFT; mmap_mfn_range()
450 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) privcmd_ioctl_mmap_batch()
492 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { privcmd_ioctl_mmap_batch()
504 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { privcmd_ioctl_mmap_batch()
584 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; privcmd_close()
641 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, privcmd_vma_range_is_mapped()
/linux-4.1.27/arch/parisc/kernel/
H A Dsys_parisc.c48 return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT; get_offset()
54 return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT; shared_align_offset()
62 (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT); COLOR_ALIGN()
81 stack_base += (STACK_RND_MASK << PAGE_SHIFT); mmap_upper_limit()
134 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); arch_get_unmapped_area()
205 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); arch_get_unmapped_area_topdown()
238 return rnd << PAGE_SHIFT; mmap_rnd()
271 pgoff >> (PAGE_SHIFT - 12)); sys_mmap2()
280 offset >> PAGE_SHIFT); sys_mmap()
H A Dpacache.S567 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
608 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
615 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
761 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
765 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
845 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
849 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
866 depdi,z 1, 63-PAGE_SHIFT,1, %r25
868 depwi,z 1, 31-PAGE_SHIFT,1, %r25
921 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
925 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
942 depdi,z 1, 63-PAGE_SHIFT,1, %r25
944 depwi,z 1, 31-PAGE_SHIFT,1, %r25
996 depdi,z 1, 63-PAGE_SHIFT,1, %r25
998 depwi,z 1, 31-PAGE_SHIFT,1, %r25
1039 depdi,z 1, 63-PAGE_SHIFT,1, %r25
1041 depwi,z 1, 31-PAGE_SHIFT,1, %r25
1145 depdi,z 1, 63-PAGE_SHIFT,1, %r25
1147 depwi,z 1, 31-PAGE_SHIFT,1, %r25
/linux-4.1.27/drivers/infiniband/hw/usnic/
H A Dusnic_uiom.c129 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; usnic_uiom_get_pages()
134 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; usnic_uiom_get_pages()
204 va = interval->start << PAGE_SHIFT; list_for_each_entry_safe()
205 size = ((interval->last - interval->start) + 1) << PAGE_SHIFT; list_for_each_entry_safe()
226 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; __usnic_uiom_reg_release()
227 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT; __usnic_uiom_reg_release()
268 if ((va >> PAGE_SHIFT) < interval_node->start) list_for_each_entry()
271 if ((va >> PAGE_SHIFT) == interval_node->start) { list_for_each_entry()
298 if ((va >> PAGE_SHIFT) == interval_node->last) { list_for_each_entry()
357 npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT; usnic_uiom_reg_get()
358 vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT; usnic_uiom_reg_get()
437 diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; usnic_uiom_reg_release()
/linux-4.1.27/arch/x86/realmode/
H A Dinit.c116 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); set_real_mode_permissions()
117 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); set_real_mode_permissions()
118 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); set_real_mode_permissions()
/linux-4.1.27/arch/arm/xen/
H A Dp2m.c96 set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, set_foreign_p2m_mapping()
97 map_ops[i].dev_bus_addr >> PAGE_SHIFT); set_foreign_p2m_mapping()
111 set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, clear_foreign_p2m_mapping()
/linux-4.1.27/arch/arc/include/asm/
H A Dpage.h74 #define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
95 (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
97 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/linux-4.1.27/drivers/sbus/char/
H A Dflash.c64 if ((vma->vm_pgoff << PAGE_SHIFT) > size) flash_mmap()
66 addr = vma->vm_pgoff + (addr >> PAGE_SHIFT); flash_mmap()
68 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) flash_mmap()
69 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); flash_mmap()
/linux-4.1.27/include/trace/events/
H A Dfilemap.h42 __entry->index << PAGE_SHIFT)
/linux-4.1.27/arch/s390/boot/compressed/
H A Dhead.S44 .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
/linux-4.1.27/arch/mips/kvm/
H A Dtlb.c181 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); kvm_mips_translate_guest_kseg0_to_hpa()
192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; kvm_mips_translate_guest_kseg0_to_hpa()
248 (0x1 << PAGE_SHIFT)); kvm_mips_host_tlb_write()
278 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); kvm_mips_handle_kseg0_tlb_fault()
303 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | kvm_mips_handle_kseg0_tlb_fault()
305 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | kvm_mips_handle_kseg0_tlb_fault()
320 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; kvm_mips_handle_commpage_tlb_fault()
322 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | kvm_mips_handle_commpage_tlb_fault()
370 >> PAGE_SHIFT) < 0) kvm_mips_handle_mapped_seg_tlb_fault()
374 >> PAGE_SHIFT) < 0) kvm_mips_handle_mapped_seg_tlb_fault()
378 >> PAGE_SHIFT]; kvm_mips_handle_mapped_seg_tlb_fault()
380 >> PAGE_SHIFT]; kvm_mips_handle_mapped_seg_tlb_fault()
384 *hpa0 = pfn0 << PAGE_SHIFT; kvm_mips_handle_mapped_seg_tlb_fault()
387 *hpa1 = pfn1 << PAGE_SHIFT; kvm_mips_handle_mapped_seg_tlb_fault()
393 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | kvm_mips_handle_mapped_seg_tlb_fault()
395 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | kvm_mips_handle_mapped_seg_tlb_fault()
/linux-4.1.27/arch/mn10300/kernel/
H A Dsys_mn10300.c32 return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); old_mmap()
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_uar.c33 #include <asm/page.h> /* PAGE_SHIFT */
44 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; mthca_uar_alloc()
H A Dmthca_allocator.c120 int p = (index * sizeof (void *)) >> PAGE_SHIFT; mthca_array_get()
130 int p = (index * sizeof (void *)) >> PAGE_SHIFT; mthca_array_set()
147 int p = (index * sizeof (void *)) >> PAGE_SHIFT; mthca_array_clear()
207 shift = get_order(size) + PAGE_SHIFT; mthca_buf_alloc()
232 shift = PAGE_SHIFT; mthca_buf_alloc()
/linux-4.1.27/arch/xtensa/kernel/
H A Dsyscall.c41 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
71 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) arch_get_unmapped_area()
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_sgdma.c39 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; nv04_sgdma_bind()
74 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; nv50_sgdma_bind()
/linux-4.1.27/arch/microblaze/mm/
H A Dinit.c82 if (!memblock_is_reserved(pfn << PAGE_SHIFT)) highmem_setup()
166 min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
168 max_mapnr = memory_size >> PAGE_SHIFT;
169 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
170 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
186 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
194 memblock_set_node(start_pfn << PAGE_SHIFT, for_each_memblock()
195 (end_pfn - start_pfn) << PAGE_SHIFT, for_each_memblock()
/linux-4.1.27/fs/ncpfs/
H A Dmmap.c48 pos = vmf->pgoff << PAGE_SHIFT; ncp_file_mmap_fault()
119 > (1U << (32 - PAGE_SHIFT))) ncp_mmap()
/linux-4.1.27/arch/s390/pci/
H A Dpci_mmio.c58 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); SYSCALL_DEFINE3()
98 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); SYSCALL_DEFINE3()
/linux-4.1.27/arch/x86/kvm/
H A Diommu.c108 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) kvm_iommu_map_pages()
112 while ((gfn << PAGE_SHIFT) & (page_size - 1)) kvm_iommu_map_pages()
123 pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT); kvm_iommu_map_pages()
135 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT); kvm_iommu_map_pages()
139 gfn += page_size >> PAGE_SHIFT; kvm_iommu_map_pages()
299 pfn = phys >> PAGE_SHIFT; kvm_iommu_put_pages()
/linux-4.1.27/arch/alpha/kernel/
H A Dpci-sysfs.c28 vma->vm_pgoff += base >> PAGE_SHIFT; hose_mmap_page_range()
41 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; __pci_mmap_fits()
43 size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1; __pci_mmap_fits()
87 vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); pci_mmap_resource()
258 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; __legacy_mmap_fits()
260 size = ((res_size - 1) >> PAGE_SHIFT) + 1; __legacy_mmap_fits()
/linux-4.1.27/arch/alpha/mm/
H A Dnuma.c70 node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT; setup_memory_node()
71 node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT); setup_memory_node()
117 mem_size_limit << (PAGE_SHIFT - 10),
118 node_max_pfn << (PAGE_SHIFT - 10));
134 node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
302 dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; paging_init()
/linux-4.1.27/drivers/gpu/drm/gma500/
H A Dmmu.c146 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset); psb_mmu_set_pd_context()
172 return (pfn << PAGE_SHIFT) | mask; psb_mmu_mask_pte()
282 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; psb_mmu_alloc_pt()
435 return page_to_pfn(pd->p) << PAGE_SHIFT; psb_get_default_pd_addr()
532 add = desired_tile_stride << PAGE_SHIFT; psb_mmu_flush_ptes()
533 row_add = hw_tile_stride << PAGE_SHIFT; psb_mmu_flush_ptes()
577 end = addr + (num_pages << PAGE_SHIFT); psb_mmu_remove_pfn_sequence()
623 add = desired_tile_stride << PAGE_SHIFT; psb_mmu_remove_pages()
624 row_add = hw_tile_stride << PAGE_SHIFT; psb_mmu_remove_pages()
675 end = addr + (num_pages << PAGE_SHIFT); psb_mmu_insert_pfn_sequence()
731 add = desired_tile_stride << PAGE_SHIFT; psb_mmu_insert_pages()
732 row_add = hw_tile_stride << PAGE_SHIFT; psb_mmu_insert_pages()
798 *pfn = pd->invalid_pte >> PAGE_SHIFT; psb_mmu_virtual_to_pfn()
806 *pfn = tmp >> PAGE_SHIFT; psb_mmu_virtual_to_pfn()
/linux-4.1.27/arch/powerpc/kvm/
H A Dbook3s_32_mmu_host.c45 #if PAGE_SHIFT != 12
166 hpaddr <<= PAGE_SHIFT; kvmppc_mmu_map_page() local
211 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); kvmppc_mmu_map_page()
217 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); kvmppc_mmu_map_page()
246 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); kvmppc_mmu_map_page()
260 pte->pfn = hpaddr >> PAGE_SHIFT; kvmppc_mmu_map_page()
264 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); kvmppc_mmu_map_page()
H A Dbook3s_hv_builtin.c35 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
54 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); kvm_alloc_hpt()
93 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; kvm_cma_reserve()
97 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; kvm_cma_reserve()
99 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); kvm_cma_reserve()
/linux-4.1.27/arch/x86/kernel/
H A Damd_gart_64.c102 PAGE_SIZE) >> PAGE_SHIFT; alloc_iommu()
104 PAGE_SIZE) >> PAGE_SHIFT; alloc_iommu()
277 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; gart_unmap_page()
471 iommu_full(dev, pages << PAGE_SHIFT, dir);
673 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); init_amd_gatt()
678 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) init_amd_gatt()
769 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); gart_iommu_init()
773 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); gart_iommu_init()
777 iommu_pages = iommu_size >> PAGE_SHIFT; gart_iommu_init()
807 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); gart_iommu_init()
819 iommu_size >> PAGE_SHIFT); gart_iommu_init()
/linux-4.1.27/drivers/edac/
H A Dpasemi_edac.c157 dimm->nr_pages = 128 << (20 - PAGE_SHIFT); pasemi_edac_init_csrows()
160 dimm->nr_pages = 256 << (20 - PAGE_SHIFT); pasemi_edac_init_csrows()
164 dimm->nr_pages = 512 << (20 - PAGE_SHIFT); pasemi_edac_init_csrows()
167 dimm->nr_pages = 1024 << (20 - PAGE_SHIFT); pasemi_edac_init_csrows()
170 dimm->nr_pages = 2048 << (20 - PAGE_SHIFT); pasemi_edac_init_csrows()

Completed in 2749 milliseconds

123456