Home
last modified time | relevance | path

Searched refs:PAGE_SHIFT (Results 1 – 200 of 1200) sorted by relevance

123456

/linux-4.1.27/arch/sparc/mm/
Dinit_32.c63 get_nr_swap_pages() << (PAGE_SHIFT-10)); in show_mem()
77 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; in calc_highpages()
78 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; in calc_highpages()
95 unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); in calc_max_low_pfn()
98 last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT; in calc_max_low_pfn()
100 curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; in calc_max_low_pfn()
108 last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; in calc_max_low_pfn()
151 start_pfn >>= PAGE_SHIFT; in bootmem_init()
155 max_pfn = end_of_phys_memory >> PAGE_SHIFT; in bootmem_init()
160 if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) { in bootmem_init()
[all …]
Diommu.c122 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT; in sbus_iommu_init()
189 busa0 = iommu->start + (ioptex << PAGE_SHIFT); in iommu_get_one()
215 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; in iommu_get_scsi_one()
245 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; in iommu_get_scsi_sgl_gflush()
260 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; in iommu_get_scsi_sgl_pflush()
290 ioptex = (busa - iommu->start) >> PAGE_SHIFT; in iommu_release_one()
305 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; in iommu_release_scsi_one()
316 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; in iommu_release_scsi_sgl()
338 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, in iommu_map_dma_area()
339 addr >> PAGE_SHIFT); in iommu_map_dma_area()
[all …]
Dgup.c84 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in gup_huge_pmd()
174 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast()
204 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
239 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); in get_user_pages_fast()
249 start += nr << PAGE_SHIFT; in get_user_pages_fast()
253 (end - start) >> PAGE_SHIFT, write, 0, pages); in get_user_pages_fast()
Dio-unit.c100 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; in iounit_get_area()
133 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); in iounit_get_area()
175 len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT; in iounit_release_scsi_one()
176 vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; in iounit_release_scsi_one()
192 len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT; in iounit_release_scsi_sgl()
193 vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; in iounit_release_scsi_sgl()
228 i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); in iounit_map_dma_area()
/linux-4.1.27/arch/hexagon/include/asm/
Dpage.h29 #define PAGE_SHIFT 12 macro
34 #define PAGE_SHIFT 14 macro
39 #define PAGE_SHIFT 16 macro
44 #define PAGE_SHIFT 18 macro
49 #define PAGE_SHIFT 20 macro
62 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
66 #define PAGE_SIZE (1UL << PAGE_SHIFT)
67 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
113 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
141 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
[all …]
Dmem-layout.h51 #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
89 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
99 #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
100 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/arch/tile/include/asm/
Dpage.h24 #define PAGE_SHIFT 14 macro
27 #define PAGE_SHIFT 16 macro
30 #define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL macro
35 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
47 #define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
132 return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT); in get_order()
137 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
153 #define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
250 ((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT); in kaddr_to_pfn()
255 return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT); in pfn_to_kaddr()
[all …]
Dpgalloc.h26 #define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
32 #if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
33 #define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
39 #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
135 #if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
136 #define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
142 #define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
Dpgtable_32.h37 #define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
38 #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
39 #define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
Dfixmap.h78 #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
79 #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
Dpgtable_64.h45 #define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
46 #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
47 #define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
Dhighmem.h50 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
51 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/arch/x86/mm/
Dinit.c88 order = get_order((unsigned long)num << PAGE_SHIFT); in alloc_low_pages()
97 ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, in alloc_low_pages()
98 max_pfn_mapped << PAGE_SHIFT, in alloc_low_pages()
103 pfn = ret >> PAGE_SHIFT; in alloc_low_pages()
108 pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); in alloc_low_pages()
114 adr = __va((pfn + i) << PAGE_SHIFT); in alloc_low_pages()
118 return __va(pfn << PAGE_SHIFT); in alloc_low_pages()
131 pgt_buf_start = base >> PAGE_SHIFT; in early_alloc_pgt_buf()
133 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); in early_alloc_pgt_buf()
193 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr()
[all …]
Dinit_32.c77 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); in one_md_table_init()
100 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); in one_page_table_init()
182 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); in page_table_kmap_check()
187 paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); in page_table_kmap_check()
268 start_pfn = start >> PAGE_SHIFT; in kernel_physical_mapping_init()
269 end_pfn = end >> PAGE_SHIFT; in kernel_physical_mapping_init()
293 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init()
301 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init()
325 pfn &= PMD_MASK >> PAGE_SHIFT; in kernel_physical_mapping_init()
344 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init()
[all …]
Dpat.c303 unsigned long start_pfn = start >> PAGE_SHIFT; in pat_pagerange_is_ram()
304 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; in pat_pagerange_is_ram()
314 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) in pat_pagerange_is_ram()
315 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; in pat_pagerange_is_ram()
344 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { in reserve_ram_pages_type()
362 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { in reserve_ram_pages_type()
374 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { in free_ram_pages_type()
532 page = pfn_to_page(paddr >> PAGE_SHIFT); in lookup_memtype()
622 u64 from = ((u64)pfn) << PAGE_SHIFT; in range_is_allowed()
667 (pfn << PAGE_SHIFT) >= __pa(high_memory)) { in phys_mem_access_prot_allowed()
[all …]
Dpageattr.c96 return __pa_symbol(_text) >> PAGE_SHIFT; in highmap_start_pfn()
101 return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT; in highmap_end_pfn()
262 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) in static_protections()
278 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, in static_protections()
279 __pa_symbol(__end_rodata) >> PAGE_SHIFT)) in static_protections()
430 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; in slow_virt_to_phys()
500 numpages = (nextpage_addr - address) >> PAGE_SHIFT; in try_preserve_large_page()
539 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); in try_preserve_large_page()
551 for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { in try_preserve_large_page()
575 if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { in try_preserve_large_page()
[all …]
Dgup.c136 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in gup_huge_pmd()
213 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); in gup_huge_pud()
269 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast()
336 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
378 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); in get_user_pages_fast()
388 start += nr << PAGE_SHIFT; in get_user_pages_fast()
392 (end - start) >> PAGE_SHIFT, in get_user_pages_fast()
Dphysaddr.c63 return pfn_valid(x >> PAGE_SHIFT); in __virt_addr_valid()
78 VIRTUAL_BUG_ON((phys_addr >> PAGE_SHIFT) > max_low_pfn); in __phys_addr()
94 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); in __virt_addr_valid()
Dinit_64.c391 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); in cleanup_highmap()
435 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); in phys_pte_init()
437 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot)); in phys_pte_init()
503 pfn_pte((address & PMD_MASK) >> PAGE_SHIFT, in phys_pmd_init()
576 pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT, in phys_pud_init()
695 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory()
696 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory()
1019 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory()
1020 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory()
1087 set_memory_rw(start, (end - start) >> PAGE_SHIFT); in set_kernel_text_rw()
[all …]
Dioremap.c34 unsigned long nrpages = size >> PAGE_SHIFT; in ioremap_change_attr()
128 pfn = phys_addr >> PAGE_SHIFT; in __ioremap_caller()
129 last_pfn = last_addr >> PAGE_SHIFT; in __ioremap_caller()
359 if (page_is_ram(start >> PAGE_SHIFT)) in xlate_dev_mem_ptr()
372 if (page_is_ram(phys >> PAGE_SHIFT)) in unxlate_dev_mem_ptr()
454 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); in __early_set_fixmap()
Dpgtable.c66 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); in ___pmd_free_tlb()
81 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); in ___pud_free_tlb()
173 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate()
251 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); in pgd_mop_up_pmds()
562 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); in native_set_fixmap()
581 (u64)addr >> PAGE_SHIFT, in pud_set_huge()
602 (u64)addr >> PAGE_SHIFT, in pmd_set_huge()
/linux-4.1.27/arch/xtensa/include/asm/
Dpage.h32 #define PAGE_SHIFT 12 macro
33 #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
75 # define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
77 # define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
86 # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
88 # define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
128 asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT)); in get_order()
170 #define ARCH_PFN_OFFSET (PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT)
181 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
182 #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
[all …]
Dfixmap.h48 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
51 #define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
52 #define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
Dpgtable.h283 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
286 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
367 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
405 #define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
412 srli pmd, pmd, PAGE_SHIFT; \
413 slli pmd, pmd, PAGE_SHIFT; \
/linux-4.1.27/arch/mn10300/include/asm/
Dpage.h15 #define PAGE_SHIFT 12 macro
18 #define PAGE_SIZE (1UL << PAGE_SHIFT)
21 #define PAGE_SIZE +(1 << PAGE_SHIFT) /* unary plus marks an
49 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
82 size = (size - 1) >> (PAGE_SHIFT - 1); in get_order()
103 #define __pfn_disp (CONFIG_KERNEL_RAM_BASE_ADDRESS >> PAGE_SHIFT)
107 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
117 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
118 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
119 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/linux-4.1.27/arch/mips/include/asm/
Dpage.h21 #define PAGE_SHIFT 12 macro
24 #define PAGE_SHIFT 13 macro
27 #define PAGE_SHIFT 14 macro
30 #define PAGE_SHIFT 15 macro
33 #define PAGE_SHIFT 16 macro
35 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
36 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
54 return (PAGE_SHIFT - 10) / 2; in page_size_ftlb()
62 #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
65 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
[all …]
Dpgtable-64.h48 #define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
52 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
57 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
228 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
229 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
255 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
266 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
Dpgtable-32.h42 #define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
50 #define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
126 #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
127 #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
148 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
Dhighmem.h41 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
42 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/arch/avr32/include/asm/
Dpage.h14 #define PAGE_SHIFT 12 macro
15 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
53 size = (size - 1) >> PAGE_SHIFT; in get_order()
79 #define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
81 #define phys_to_page(phys) (pfn_to_page(phys >> PAGE_SHIFT))
82 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
86 #define PHYS_PFN_OFFSET (CONFIG_PHYS_OFFSET >> PAGE_SHIFT)
93 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
94 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
Dpgtable-2level.h41 #define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
42 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
43 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/linux-4.1.27/arch/metag/include/asm/
Dpage.h10 #define PAGE_SHIFT 12 macro
12 #define PAGE_SHIFT 13 macro
14 #define PAGE_SHIFT 14 macro
17 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
45 # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
108 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
109 #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
110 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
111 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
118 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
Dhighmem.h29 #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
30 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/arch/openrisc/include/asm/
Dpage.h25 #define PAGE_SHIFT 13 macro
27 #define PAGE_SIZE (1 << PAGE_SHIFT)
29 #define PAGE_SIZE (1UL << PAGE_SHIFT)
82 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
83 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
86 (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
88 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
90 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
Dpgtable.h60 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))
70 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))
72 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))
218 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
354 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
358 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
371 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
394 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
404 #define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT)
405 #define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
Dfixmap.h52 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
56 #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
57 #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
/linux-4.1.27/arch/nios2/mm/
Dtlb.c24 << PAGE_SHIFT)
63 WRCTL(CTL_PTEADDR, (addr >> PAGE_SHIFT) << 2); in flush_tlb_one_pid()
75 if (((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) && in flush_tlb_one_pid()
87 WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT)); in flush_tlb_one_pid()
127 WRCTL(CTL_PTEADDR, (addr >> PAGE_SHIFT) << 2); in flush_tlb_one()
138 if ((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) { in flush_tlb_one()
150 WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT)); in flush_tlb_one()
163 line << (PAGE_SHIFT + cpuinfo.tlb_num_ways_log2)); in dump_tlb_line()
180 if ((tlbacc << PAGE_SHIFT) != (MAX_PHYS_ADDR & PAGE_MASK)) { in dump_tlb_line()
183 (pteaddr << (PAGE_SHIFT-2)), in dump_tlb_line()
[all …]
/linux-4.1.27/arch/sh/include/asm/
Dpage.h12 # define PAGE_SHIFT 12 macro
14 # define PAGE_SHIFT 13 macro
16 # define PAGE_SHIFT 14 macro
18 # define PAGE_SHIFT 16 macro
23 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
44 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
166 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
167 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
175 #define PFN_START (__MEMORY_START >> PAGE_SHIFT)
177 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
[all …]
Dpgtable_64.h79 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
244 __MEMORY_START) >> PAGE_SHIFT)
254 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
283 set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
307 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
308 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/linux-4.1.27/arch/ia64/include/asm/
Dpage.h30 # define PAGE_SHIFT 12 macro
32 # define PAGE_SHIFT 13 macro
34 # define PAGE_SHIFT 14 macro
36 # define PAGE_SHIFT 16 macro
41 #define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
95 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
123 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
124 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
125 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
150 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
[all …]
Dpgtable.h84 #define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3)
97 #define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
226 # define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
231 # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
234 # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
250 ({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
253 #define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
366 return (region << (PAGE_SHIFT - 6)) | l1index; in pgd_index()
401 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
493 #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
Dsparsemem.h13 #if ((CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS)
15 #define SECTION_SIZE_BITS (CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT)
/linux-4.1.27/arch/arc/include/uapi/asm/
Dpage.h14 #define PAGE_SHIFT 14 macro
16 #define PAGE_SHIFT 12 macro
25 #define PAGE_SHIFT 13 macro
29 #define PAGE_SIZE (1 << PAGE_SHIFT)
32 #define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */
/linux-4.1.27/arch/alpha/include/asm/
Dpgtable.h31 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
36 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
44 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
45 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
46 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
51 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
170 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
193 #define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT)
195 ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
206 #define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT)
[all …]
Dmmzone.h38 (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
44 temp = p >> PAGE_SHIFT; in PLAT_NODE_DATA_LOCALNR()
67 << PAGE_SHIFT))
72 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
94 kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
101 (page_to_pfn(page) << PAGE_SHIFT)
103 #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
108 #define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
Dpage.h8 #define PAGE_SHIFT 13 macro
9 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
86 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
89 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/linux-4.1.27/arch/cris/include/asm/
Dpage.h8 #define PAGE_SHIFT 13 macro
9 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
42 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
43 #define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr)
53 #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT))
54 #define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT)
60 #define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
Dpgtable.h51 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))
61 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))
62 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))
90 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
213 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
217 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
228 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
245 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
252 #define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)
253 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/linux-4.1.27/arch/powerpc/include/asm/
Dpage.h28 #define PAGE_SHIFT 18 macro
30 #define PAGE_SHIFT 16 macro
32 #define PAGE_SHIFT 14 macro
34 #define PAGE_SHIFT 12 macro
37 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
43 #define HPAGE_SHIFT PAGE_SHIFT
47 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
56 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
126 #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
130 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
[all …]
Dmmu-44x.h126 #if (PAGE_SHIFT == 12)
130 #elif (PAGE_SHIFT == 14)
134 #elif (PAGE_SHIFT == 16)
138 #elif (PAGE_SHIFT == 18)
Dpage_32.h23 #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */
25 #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
Dpgtable-ppc64-64k.h23 #define MIN_HUGEPTE_SHIFT PAGE_SHIFT
26 #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
/linux-4.1.27/arch/arm/mm/
Dtlb-v7.S39 mov r0, r0, lsr #PAGE_SHIFT @ align address
40 mov r1, r1, lsr #PAGE_SHIFT
46 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
47 mov r1, r1, lsl #PAGE_SHIFT
73 mov r0, r0, lsr #PAGE_SHIFT @ align address
74 mov r1, r1, lsr #PAGE_SHIFT
75 mov r0, r0, lsl #PAGE_SHIFT
76 mov r1, r1, lsl #PAGE_SHIFT
Dtlb-v6.S41 mov r0, r0, lsr #PAGE_SHIFT @ align address
42 mov r1, r1, lsr #PAGE_SHIFT
44 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
45 mov r1, r1, lsl #PAGE_SHIFT
72 mov r0, r0, lsr #PAGE_SHIFT @ align address
73 mov r1, r1, lsr #PAGE_SHIFT
74 mov r0, r0, lsl #PAGE_SHIFT
75 mov r1, r1, lsl #PAGE_SHIFT
Dmmap.c16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
76 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) in arch_get_unmapped_area()
101 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area()
130 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) in arch_get_unmapped_area_topdown()
152 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area_topdown()
179 return rnd << PAGE_SHIFT; in arch_mmap_rnd()
217 return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT)); in valid_mmap_phys_addr_range()
233 if (iomem_is_exclusive(pfn << PAGE_SHIFT)) in devmem_is_allowed()
Dcache-xsc3l2.c82 unsigned long pa_offset = pa << (32 - PAGE_SHIFT); in l2_map_va()
83 if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) { in l2_map_va()
90 va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); in l2_map_va()
92 return va + (pa_offset >> (32 - PAGE_SHIFT)); in l2_map_va()
Dcopypage-v6.c90 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); in v6_copy_user_highpage_aliasing()
91 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); in v6_copy_user_highpage_aliasing()
108 unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); in v6_clear_user_highpage_aliasing()
Dfault-armv.c57 outer_flush_range((pfn << PAGE_SHIFT), in do_adjust_pte()
58 (pfn << PAGE_SHIFT) + PAGE_SIZE); in do_adjust_pte()
141 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); in make_coherent()
159 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; in make_coherent()
/linux-4.1.27/arch/microblaze/include/asm/
Dpage.h27 #define PAGE_SHIFT 16 macro
29 #define PAGE_SHIFT 14 macro
31 #define PAGE_SHIFT 12 macro
33 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
38 #define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
152 # define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
153 # define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
154 # define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
167 # define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
169 # define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
/linux-4.1.27/arch/sh/mm/
Dnuma.c36 start_pfn = start >> PAGE_SHIFT; in setup_bootmem_node()
37 end_pfn = end >> PAGE_SHIFT; in setup_bootmem_node()
57 bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT, in setup_bootmem_node()
59 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, in setup_bootmem_node()
65 reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, in setup_bootmem_node()
68 bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); in setup_bootmem_node()
Dinit.c82 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); in set_pte_phys()
204 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); in allocate_pgdat()
238 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); in bootmem_init_one_node()
242 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); in bootmem_init_one_node()
346 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; in paging_init()
347 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; in paging_init()
417 __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); in mem_init()
491 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory()
492 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory()
520 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory()
[all …]
Dmmap.c28 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; in COLOUR_ALIGN()
46 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) in arch_get_unmapped_area()
75 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area()
95 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) in arch_get_unmapped_area_topdown()
125 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area_topdown()
Dgup.c175 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast()
228 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
247 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); in get_user_pages_fast()
257 start += nr << PAGE_SHIFT; in get_user_pages_fast()
261 (end - start) >> PAGE_SHIFT, write, 0, pages); in get_user_pages_fast()
/linux-4.1.27/drivers/base/
Ddma-coherent.c26 int pages = size >> PAGE_SHIFT; in dma_init_coherent_memory()
130 pos = (device_addr - mem->device_base) >> PAGE_SHIFT; in dma_mark_declared_memory_occupied()
136 return mem->virt_base + (pos << PAGE_SHIFT); in dma_mark_declared_memory_occupied()
172 if (unlikely(size > (mem->size << PAGE_SHIFT))) in dma_alloc_from_coherent()
182 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); in dma_alloc_from_coherent()
183 *ret = mem->virt_base + (pageno << PAGE_SHIFT); in dma_alloc_from_coherent()
218 (mem->virt_base + (mem->size << PAGE_SHIFT))) { in dma_release_from_coherent()
219 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; in dma_release_from_coherent()
252 (mem->virt_base + (mem->size << PAGE_SHIFT))) { in dma_mmap_from_coherent()
254 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; in dma_mmap_from_coherent()
[all …]
/linux-4.1.27/include/asm-generic/
Dpage.h15 #define PAGE_SHIFT 12 macro
17 #define PAGE_SIZE (1 << PAGE_SHIFT)
19 #define PAGE_SIZE (1UL << PAGE_SHIFT)
75 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
83 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
84 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
90 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
Dgetorder.h18 size >>= PAGE_SHIFT; in __get_order()
52 ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
53 (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
54 ilog2((n) - 1) - PAGE_SHIFT + 1) \
/linux-4.1.27/arch/parisc/include/asm/
Dmmzone.h28 #define PFNNID_SHIFT (30 - PAGE_SHIFT)
33 #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
36 …ne pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_S…
Dpage.h7 # define PAGE_SHIFT 12 macro
9 # define PAGE_SHIFT 14 macro
11 # define PAGE_SHIFT 16 macro
15 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
151 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
154 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
156 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
157 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
/linux-4.1.27/arch/powerpc/mm/
Dmem.c84 unsigned long paddr = (pfn << PAGE_SHIFT); in page_is_ram()
120 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory()
121 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_add_memory()
139 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory()
140 unsigned long nr_pages = size >> PAGE_SHIFT; in arch_remove_memory()
194 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; in initmem_init()
195 min_low_pfn = MEMORY_START >> PAGE_SHIFT; in initmem_init()
197 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; in initmem_init()
311 limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT); in paging_init()
316 limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT); in paging_init()
[all …]
Dsubpage-prot.c118 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); in subpage_prot_clear()
120 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in subpage_prot_clear()
122 if (addr + (nw << PAGE_SHIFT) > next) in subpage_prot_clear()
123 nw = (next - addr) >> PAGE_SHIFT; in subpage_prot_clear()
212 if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32))) in sys_subpage_prot()
239 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); in sys_subpage_prot()
245 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in sys_subpage_prot()
247 if (addr + (nw << PAGE_SHIFT) > next) in sys_subpage_prot()
248 nw = (next - addr) >> PAGE_SHIFT; in sys_subpage_prot()
Dmmap.c62 rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT)); in arch_mmap_rnd()
64 rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT)); in arch_mmap_rnd()
66 return rnd << PAGE_SHIFT; in arch_mmap_rnd()
/linux-4.1.27/arch/arm/include/asm/
Dmemory.h126 #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
127 #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
169 #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
171 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
187 #define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
191 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
253 #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
266 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
294 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
Dpage.h14 #define PAGE_SHIFT 12 macro
15 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
16 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
/linux-4.1.27/arch/x86/kernel/cpu/mtrr/
Damd.c19 *base = (low & 0xFFFE0000) >> PAGE_SHIFT; in amd_get_mtrr()
45 *size = (low + 4) << (15 - PAGE_SHIFT); in amd_get_mtrr()
82 regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) in amd_set_mtrr()
83 | (base << PAGE_SHIFT) | (type + 1); in amd_set_mtrr()
105 if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) in amd_validate_add_page()
Dcentaur.c60 *base = centaur_mcr[reg].high >> PAGE_SHIFT; in centaur_get_mcr()
61 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; in centaur_get_mcr()
82 high = base << PAGE_SHIFT; in centaur_set_mcr()
85 low = -size << PAGE_SHIFT | 0x1f; in centaur_set_mcr()
88 low = -size << PAGE_SHIFT | 0x02; /* NC */ in centaur_set_mcr()
90 low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */ in centaur_set_mcr()
Dif.c54 base >>= PAGE_SHIFT; in mtrr_file_add()
55 size >>= PAGE_SHIFT; in mtrr_file_add()
73 base >>= PAGE_SHIFT; in mtrr_file_del()
74 size >>= PAGE_SHIFT; in mtrr_file_del()
155 base >>= PAGE_SHIFT; in mtrr_write()
156 size >>= PAGE_SHIFT; in mtrr_write()
274 if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) in mtrr_ioctl()
275 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) in mtrr_ioctl()
278 gentry.base = base << PAGE_SHIFT; in mtrr_ioctl()
279 gentry.size = size << PAGE_SHIFT; in mtrr_ioctl()
[all …]
Dcleanup.c100 if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && in x86_get_mtrr_mem_range()
104 if (base + size <= (1<<(20-PAGE_SHIFT))) in x86_get_mtrr_mem_range()
106 size -= (1<<(20-PAGE_SHIFT)) - base; in x86_get_mtrr_mem_range()
107 base = 1<<(20-PAGE_SHIFT); in x86_get_mtrr_mem_range()
207 range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); in save_var_mtrr()
208 range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); in save_var_mtrr()
219 basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10); in set_var_mtrr_all()
220 sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10); in set_var_mtrr_all()
419 basek = base_pfn << (PAGE_SHIFT - 10); in set_var_mtrr_range()
420 sizek = size_pfn << (PAGE_SHIFT - 10); in set_var_mtrr_range()
[all …]
Dgeneric.c71 mask >>= PAGE_SHIFT; in get_mtrr_size()
74 size <<= PAGE_SHIFT; in get_mtrr_size()
364 high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4; in print_mtrr_state()
536 tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; in generic_get_mtrr()
556 *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; in generic_get_mtrr()
596 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != in set_mtrr_var_ranges()
597 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { in set_mtrr_var_ranges()
606 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != in set_mtrr_var_ranges()
607 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { in set_mtrr_var_ranges()
770 vr->base_lo = base << PAGE_SHIFT | type; in generic_set_mtrr()
[all …]
/linux-4.1.27/sound/core/
Dsgbuf.c52 tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; in snd_free_sgbuf_pages()
100 chunk <<= PAGE_SHIFT; in snd_malloc_sgbuf_pages()
110 chunk = tmpb.bytes >> PAGE_SHIFT; in snd_malloc_sgbuf_pages()
149 start = ofs >> PAGE_SHIFT; in snd_sgbuf_get_chunk_size()
150 end = (ofs + size - 1) >> PAGE_SHIFT; in snd_sgbuf_get_chunk_size()
152 pg = sg->table[start].addr >> PAGE_SHIFT; in snd_sgbuf_get_chunk_size()
158 if ((sg->table[start].addr >> PAGE_SHIFT) != pg) in snd_sgbuf_get_chunk_size()
159 return (start << PAGE_SHIFT) - ofs; in snd_sgbuf_get_chunk_size()
/linux-4.1.27/arch/arm64/include/asm/
Dmemory.h86 #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
87 #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
132 #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
156 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
165 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
166 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
Dpage.h24 #define PAGE_SHIFT 16 macro
26 #define PAGE_SHIFT 12 macro
28 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
Dpgtable-hwdef.h19 #define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3))
25 #define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
35 #define PUD_SHIFT ((PAGE_SHIFT - 3) * 3 + 3)
45 #define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_PGTABLE_LEVELS + 3)
Dtlbflush.h105 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) in __flush_tlb_range()
117 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) in __flush_tlb_kernel_range()
127 #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
Delf.h153 0x7ff >> (PAGE_SHIFT - 12) : \
154 0x3ffff >> (PAGE_SHIFT - 12))
156 #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
/linux-4.1.27/arch/unicore32/include/asm/
Dmemory.h66 #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
67 #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
89 #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
96 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
109 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
/linux-4.1.27/arch/sparc/include/asm/
Dpage_64.h6 #define PAGE_SHIFT 13 macro
8 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
14 #if PAGE_SHIFT < 14
26 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
138 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
140 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
142 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
Dio-unit.h44 unsigned long bmap[(IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 3)) / sizeof(unsigned long)];
52 #define IOUNIT_BMAP1_END (IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 1))
54 #define IOUNIT_BMAP2_END IOUNIT_BMAP2_START + (IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 2))
56 #define IOUNIT_BMAPM_END ((IOUNIT_DMA_SIZE - IOUNIT_DVMA_SIZE) >> PAGE_SHIFT)
Dhighmem.h43 #define PKMAP_SIZE (LAST_PKMAP << PAGE_SHIFT)
44 #define PKMAP_BASE PMD_ALIGN(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
47 #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
48 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
Dpgtable_64.h48 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
51 #define PMD_BITS (PAGE_SHIFT - 3)
59 #define PUD_BITS (PAGE_SHIFT - 3)
65 #define PGDIR_BITS (PAGE_SHIFT - 3)
90 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
239 unsigned long paddr = pfn << PAGE_SHIFT; in pfn_pte()
271 "i" (21), "i" (21 + PAGE_SHIFT), in pte_pfn()
272 "i" (8), "i" (8 + PAGE_SHIFT)); in pte_pfn()
805 return ((unsigned long) __va(pfn << PAGE_SHIFT)); in __pmd_page()
859 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
[all …]
Dtsb.h159 srlx REG2, 64 - PAGE_SHIFT, REG2; \
164 srlx REG2, 64 - PAGE_SHIFT, REG2; \
176 srlx REG2, 64 - PAGE_SHIFT, REG2; \
191 srlx REG2, 64 - PAGE_SHIFT, REG2; \
237 srlx REG2, 64 - PAGE_SHIFT, REG2; \
242 srlx REG2, 64 - PAGE_SHIFT, REG2; \
247 srlx REG2, 64 - PAGE_SHIFT, REG2; \
252 srlx REG2, 64 - PAGE_SHIFT, REG2; \
311 srlx VADDR, PAGE_SHIFT, REG2; \
Dpage_32.h13 #define PAGE_SHIFT 12 macro
14 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
130 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
133 #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
Dthread_info_64.h102 #if PAGE_SHIFT == 13
104 #define THREAD_SHIFT (PAGE_SHIFT + 1)
107 #define THREAD_SHIFT PAGE_SHIFT
130 #if PAGE_SHIFT == 13
Dvaddrs.h50 #define FIXADDR_SIZE ((FIX_KMAP_END + 1) << PAGE_SHIFT)
53 #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
/linux-4.1.27/arch/metag/mm/
Dnuma.c39 start_pfn = start >> PAGE_SHIFT; in setup_bootmem_node()
40 end_pfn = end >> PAGE_SHIFT; in setup_bootmem_node()
60 bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT, in setup_bootmem_node()
62 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, in setup_bootmem_node()
71 bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); in setup_bootmem_node()
Dinit.c62 set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY)); in insert_gateway_page()
120 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); in allocate_pgdat()
159 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); in bootmem_init_one_node()
163 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); in bootmem_init_one_node()
186 << PAGE_SHIFT; in bootmem_init_one_node()
228 u64 base = min_low_pfn << PAGE_SHIFT; in init_and_reserve_mem()
229 u64 size = (max_low_pfn << PAGE_SHIFT) - base; in init_and_reserve_mem()
250 base = highstart_pfn << PAGE_SHIFT; in init_and_reserve_mem()
251 size = (highend_pfn << PAGE_SHIFT) - base; in init_and_reserve_mem()
/linux-4.1.27/arch/parisc/mm/
Dinit.c115 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
189 size = (pmem_ranges[i].pages << PAGE_SHIFT); in setup_bootmem()
190 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); in setup_bootmem()
200 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; in setup_bootmem()
201 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; in setup_bootmem()
221 rsize = pmem_ranges[i].pages << PAGE_SHIFT; in setup_bootmem()
227 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) in setup_bootmem()
228 - (mem_max >> PAGE_SHIFT); in setup_bootmem()
268 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT; in setup_bootmem()
304 (start_pfn << PAGE_SHIFT), in setup_bootmem()
[all …]
/linux-4.1.27/arch/m32r/include/asm/
Dpage.h7 #define PAGE_SHIFT 12 macro
8 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
73 #define PFN_BASE (CONFIG_MEMORY_START >> PAGE_SHIFT)
78 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
79 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
Dpgtable-2level.h58 #define pgd_page(pgd) (mem_map + ((pgd_val(pgd) >> PAGE_SHIFT) - PFN_BASE))
70 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
71 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
72 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/linux-4.1.27/arch/s390/include/asm/
Dpage.h14 #define PAGE_SHIFT 12 macro
15 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
23 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
146 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
147 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
148 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
Ddiag.h18 start_addr = start_pfn << PAGE_SHIFT; in diag10_range()
19 end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; in diag10_range()
/linux-4.1.27/arch/mips/mm/
Dtlb-r8k.c43 write_c0_vaddr(entry << PAGE_SHIFT); in local_flush_tlb_all()
44 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); in local_flush_tlb_all()
72 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; in local_flush_tlb_range()
101 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); in local_flush_tlb_range()
115 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; in local_flush_tlb_kernel_range()
141 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); in local_flush_tlb_kernel_range()
170 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); in local_flush_tlb_page()
Dmmap.c50 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
77 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) in arch_get_unmapped_area_common()
101 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area_common()
150 rnd <<= PAGE_SHIFT; in arch_mmap_rnd()
179 rnd = rnd << PAGE_SHIFT; in brk_rnd()
Dgup.c86 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in gup_huge_pmd()
152 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); in gup_huge_pud()
208 len = (unsigned long) nr_pages << PAGE_SHIFT; in __get_user_pages_fast()
274 len = (unsigned long) nr_pages << PAGE_SHIFT; in get_user_pages_fast()
294 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); in get_user_pages_fast()
301 start += nr << PAGE_SHIFT; in get_user_pages_fast()
305 (end - start) >> PAGE_SHIFT, in get_user_pages_fast()
/linux-4.1.27/arch/c6x/mm/
Ddma-coherent.c54 return dma_base + (pos << PAGE_SHIFT); in __alloc_dma_pages()
60 u32 pos = (addr - dma_base) >> PAGE_SHIFT; in __free_dma_pages()
85 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); in dma_alloc_coherent()
110 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); in dma_free_coherent()
134 dma_pages = dma_size >> PAGE_SHIFT; in coherent_mem_init()
Dinit.c52 zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT; in paging_init()
54 __pa(PAGE_OFFSET) >> PAGE_SHIFT; in paging_init()
/linux-4.1.27/arch/x86/include/asm/
Dpage_types.h8 #define PAGE_SHIFT 12 macro
9 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
26 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
58 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; in get_max_mapped()
Dpgalloc.h65 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel()
75 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); in pmd_populate()
114 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate()
122 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); in pgd_populate()
Dpgtable.h140 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; in pte_pfn()
145 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; in pmd_pfn()
150 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; in pud_pfn()
339 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | in pfn_pte()
345 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | in pfn_pmd()
506 #define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
536 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in pte_index()
551 return npg >> (20 - PAGE_SHIFT); in pages_to_mb()
574 #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
614 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
[all …]
Da.out-core.h31 dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; in aout_dump_thread()
33 >> PAGE_SHIFT; in aout_dump_thread()
40 >> PAGE_SHIFT; in aout_dump_thread()
Dhighmem.h57 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
58 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/linux-4.1.27/tools/testing/selftests/vm/
Dtranshuge-stress.c19 #define PAGE_SHIFT 12 macro
22 #define PAGE_SIZE (1 << PAGE_SHIFT)
47 (uintptr_t)ptr >> (PAGE_SHIFT - 3)) != sizeof(ent)) in allocate_transhuge()
52 !(PAGEMAP_PFN(ent[0]) & ((1 << (HPAGE_SHIFT - PAGE_SHIFT)) - 1))) in allocate_transhuge()
82 len >> (20 + HPAGE_SHIFT - PAGE_SHIFT - 1)); in main()
117 size_t idx = pfn >> (HPAGE_SHIFT - PAGE_SHIFT); in main()
/linux-4.1.27/arch/ia64/mm/
Dinit.c335 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) in ia64_mmu_init()
343 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) in ia64_mmu_init()
356 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || in ia64_mmu_init()
447 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); in create_mem_map_page_table()
448 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); in create_mem_map_page_table()
469 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, in create_mem_map_page_table()
489 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); in virtual_memmap_init()
490 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); in virtual_memmap_init()
582 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; in find_max_min_low_pfn()
583 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; in find_max_min_low_pfn()
[all …]
Ddiscontig.c80 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; in build_node_maps()
81 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT; in build_node_maps()
330 spfn = start >> PAGE_SHIFT; in find_pernode_space()
331 epfn = (start + len) >> PAGE_SHIFT; in find_pernode_space()
334 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT; in find_pernode_space()
403 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT; in reserve_pernode_space()
572 map>>PAGE_SHIFT, in find_memory()
674 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; in count_node_pages()
679 end >> PAGE_SHIFT); in count_node_pages()
681 start >> PAGE_SHIFT); in count_node_pages()
[all …]
/linux-4.1.27/arch/arm64/mm/
Dmmap.c36 #define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1))
56 return rnd << PAGE_SHIFT; in arch_mmap_rnd()
116 return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); in valid_mmap_phys_addr_range()
131 if (iomem_is_exclusive(pfn << PAGE_SHIFT)) in devmem_is_allowed()
Ddma-mapping.c107 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, in __dma_alloc_coherent()
135 size >> PAGE_SHIFT); in __dma_free_coherent()
312 PAGE_SHIFT; in __dma_common_mmap()
313 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __dma_common_mmap()
314 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; in __dma_common_mmap()
359 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; in atomic_pool_init()
377 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); in atomic_pool_init()
395 (void *)PAGE_SHIFT); in atomic_pool_init()
Dhugetlbpage.c59 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); in setup_hugepagesz()
61 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); in setup_hugepagesz()
/linux-4.1.27/include/linux/
Dpfn.h9 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
10 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
11 #define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
Dslab.h174 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
175 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
187 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
188 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
200 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
212 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
/linux-4.1.27/arch/m68k/include/asm/
Dpage_no.h25 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
26 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
28 #define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
29 #define page_to_virt(page) __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
Dpage.h10 #define PAGE_SHIFT 13 macro
12 #define PAGE_SHIFT 12 macro
14 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
Dvirtconvert.h31 __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
33 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
Da.out-core.h31 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; in aout_dump_thread()
33 (PAGE_SIZE-1))) >> PAGE_SHIFT; in aout_dump_thread()
38 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; in aout_dump_thread()
/linux-4.1.27/arch/mips/loongson/loongson-3/
Dnuma.c142 start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; in szmem()
143 node_psize = (mem_size << 20) >> PAGE_SHIFT; in szmem()
156 start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; in szmem()
157 node_psize = (mem_size << 20) >> PAGE_SHIFT; in szmem()
211 reserve_bootmem_node(NODE_DATA(node), start_pfn << PAGE_SHIFT, in node_mem_init()
212 ((freepfn - start_pfn) << PAGE_SHIFT) + bootmap_size, in node_mem_init()
215 if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) { in node_mem_init()
278 high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); in mem_init()
/linux-4.1.27/arch/s390/mm/
Dmmap.c43 return STACK_RND_MASK << PAGE_SHIFT; in stack_maxrandom_size()
66 return (get_random_int() & 0x7ff) << PAGE_SHIFT; in arch_mmap_rnd()
68 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT; in arch_mmap_rnd()
119 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; in arch_get_unmapped_area()
120 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area()
159 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; in arch_get_unmapped_area_topdown()
160 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area_topdown()
/linux-4.1.27/arch/xtensa/mm/
Dtlb.c30 int e = w + (i << PAGE_SHIFT); in __flush_itlb_all()
43 int e = w + (i << PAGE_SHIFT); in __flush_dtlb_all()
104 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { in local_flush_tlb_range()
155 end - start < _TLB_ENTRIES << PAGE_SHIFT) { in local_flush_tlb_kernel_range()
218 unsigned tlbidx = w | (e << PAGE_SHIFT); in check_tlb_entry()
221 unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); in check_tlb_entry()
242 struct page *p = pfn_to_page(r1 >> PAGE_SHIFT); in check_tlb_entry()
Dinit.c258 pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT; in bootmem_init()
261 pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT; in bootmem_init()
269 max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ? in bootmem_init()
270 max_pfn : MAX_MEM_PFN >> PAGE_SHIFT; in bootmem_init()
275 bootmap_size <<= PAGE_SHIFT; in bootmem_init()
291 bootmap_start >> PAGE_SHIFT, in bootmem_init()
298 if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) { in bootmem_init()
299 unsigned long end = min(max_low_pfn << PAGE_SHIFT, in bootmem_init()
336 high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); in mem_init()
/linux-4.1.27/arch/m32r/mm/
Dinit.c68 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; in zone_sizes_init()
79 zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; in zone_sizes_init()
80 zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT; in zone_sizes_init()
81 start_pfn = __MEMORY_START >> PAGE_SHIFT; in zone_sizes_init()
/linux-4.1.27/arch/unicore32/mm/
Dtlb-ucv2.S30 mov r0, r0 >> #PAGE_SHIFT @ align address
31 mov r0, r0 << #PAGE_SHIFT
69 mov r0, r0 >> #PAGE_SHIFT @ align address
70 mov r0, r0 << #PAGE_SHIFT
/linux-4.1.27/fs/romfs/
Dmmap-nommu.c37 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in romfs_get_unmapped_area()
39 offset = pgoff << PAGE_SHIFT; in romfs_get_unmapped_area()
41 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; in romfs_get_unmapped_area()
48 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) in romfs_get_unmapped_area()
/linux-4.1.27/arch/mips/kvm/
Dtlb.c181 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); in kvm_mips_translate_guest_kseg0_to_hpa()
192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; in kvm_mips_translate_guest_kseg0_to_hpa()
248 (0x1 << PAGE_SHIFT)); in kvm_mips_host_tlb_write()
278 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); in kvm_mips_handle_kseg0_tlb_fault()
303 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | in kvm_mips_handle_kseg0_tlb_fault()
305 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | in kvm_mips_handle_kseg0_tlb_fault()
320 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; in kvm_mips_handle_commpage_tlb_fault()
322 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | in kvm_mips_handle_commpage_tlb_fault()
370 >> PAGE_SHIFT) < 0) in kvm_mips_handle_mapped_seg_tlb_fault()
374 >> PAGE_SHIFT) < 0) in kvm_mips_handle_mapped_seg_tlb_fault()
[all …]
/linux-4.1.27/fs/logfs/
Ddev_mtd.c78 pgoff_t index = ofs >> PAGE_SHIFT; in logfs_mtd_erase_mapping()
80 for (index = ofs >> PAGE_SHIFT; index < (ofs + len) >> PAGE_SHIFT; index++) { in logfs_mtd_erase_mapping()
130 err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE, in logfs_mtd_readpage()
162 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); in logfs_mtd_find_first_sb()
180 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); in logfs_mtd_find_last_sb()
195 err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, in __logfs_mtd_writeseg()
226 __logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); in logfs_mtd_writeseg()
/linux-4.1.27/arch/tile/kernel/
Dvdso.c72 int data_pages = sizeof(vdso_data_store) >> PAGE_SHIFT; in vdso_init()
96 vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; in vdso_init()
101 vdso32_pages = (vdso32_end - vdso32_start) >> PAGE_SHIFT; in vdso_init()
154 (pages << PAGE_SHIFT) + in setup_vdso_pages()
183 pages << PAGE_SHIFT, in setup_vdso_pages()
Dsys.c81 #define PAGE_ADJUST (PAGE_SHIFT - 12) in SYSCALL_DEFINE6()
93 if (offset & ((1 << PAGE_SHIFT) - 1)) in SYSCALL_DEFINE6()
96 offset >> PAGE_SHIFT); in SYSCALL_DEFINE6()
Dsetup.c132 maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); in setup_maxmem()
134 maxmem_pfn >> (20 - PAGE_SHIFT)); in setup_maxmem()
151 (HPAGE_SHIFT - PAGE_SHIFT); in setup_maxnodemem()
153 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); in setup_maxnodemem()
205 (HPAGE_SHIFT - PAGE_SHIFT); in setup_memmap()
321 << PAGE_SHIFT); in setup_pa_va_mapping()
323 vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT); in setup_pa_va_mapping()
354 HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT; in store_permanent_mappings()
363 hv_store_mapping(addr, pages << PAGE_SHIFT, pa); in store_permanent_mappings()
464 start = range.start >> PAGE_SHIFT; in setup_memory()
[all …]
/linux-4.1.27/arch/microblaze/mm/
Dinit.c82 if (!memblock_is_reserved(pfn << PAGE_SHIFT)) in highmem_setup()
166 min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ in setup_memory()
168 max_mapnr = memory_size >> PAGE_SHIFT; in setup_memory()
169 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; in setup_memory()
170 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; in setup_memory()
186 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); in setup_memory()
194 memblock_set_node(start_pfn << PAGE_SHIFT, in setup_memory()
195 (end_pfn - start_pfn) << PAGE_SHIFT, in setup_memory()
/linux-4.1.27/arch/mips/sgi-ip27/
Dip27-memory.c34 #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
35 #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
290 return size >> PAGE_SHIFT; in slot_psize_compute()
296 return size >> PAGE_SHIFT; in slot_psize_compute()
375 if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) > in szmem()
376 (slot0sz << PAGE_SHIFT)) { in szmem()
400 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); in node_mem_init()
415 reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, in node_mem_init()
416 ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size, in node_mem_init()
480 high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); in mem_init()
/linux-4.1.27/arch/score/include/asm/
Dfixmap.h58 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
61 #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
63 ((FIXADDR_TOP - ((x) & PAGE_MASK)) >> PAGE_SHIFT)
Dpage.h8 #define PAGE_SHIFT (12) macro
9 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
79 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
/linux-4.1.27/arch/x86/power/
Dhibernate_64.c68 mstart = pfn_mapped[i].start << PAGE_SHIFT; in set_up_temporary_mappings()
69 mend = pfn_mapped[i].end << PAGE_SHIFT; in set_up_temporary_mappings()
105 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; in pfn_is_nosave()
106 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; in pfn_is_nosave()
/linux-4.1.27/arch/m68k/coldfire/
Dm54xx.c94 num_pages = (_ramend - _rambase) >> PAGE_SHIFT; in mcf54xx_bootmem_alloc()
98 min_low_pfn = _rambase >> PAGE_SHIFT; in mcf54xx_bootmem_alloc()
99 start_pfn = memstart >> PAGE_SHIFT; in mcf54xx_bootmem_alloc()
100 max_low_pfn = _ramend >> PAGE_SHIFT; in mcf54xx_bootmem_alloc()
/linux-4.1.27/arch/nios2/include/asm/
Dpage.h24 #define PAGE_SHIFT 12 macro
25 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
87 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
89 # define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
/linux-4.1.27/arch/parisc/kernel/
Dsys_parisc.c48 return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT; in get_offset()
54 return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT; in shared_align_offset()
62 (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT); in COLOR_ALIGN()
81 stack_base += (STACK_RND_MASK << PAGE_SHIFT); in mmap_upper_limit()
134 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); in arch_get_unmapped_area()
205 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); in arch_get_unmapped_area_topdown()
238 return rnd << PAGE_SHIFT; in mmap_rnd()
271 pgoff >> (PAGE_SHIFT - 12)); in sys_mmap2()
280 offset >> PAGE_SHIFT); in sys_mmap()
/linux-4.1.27/arch/m68k/mm/
Dmotorola.c252 min_low_pfn = availmem >> PAGE_SHIFT; in paging_init()
253 max_low_pfn = max_addr >> PAGE_SHIFT; in paging_init()
261 availmem >> PAGE_SHIFT, in paging_init()
262 addr >> PAGE_SHIFT, in paging_init()
263 end >> PAGE_SHIFT); in paging_init()
301 zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; in paging_init()
303 m68k_memory[i].addr >> PAGE_SHIFT, NULL); in paging_init()
/linux-4.1.27/arch/frv/kernel/
Dsetup.c889 kend >> PAGE_SHIFT, /* map addr */ in setup_linux_memory()
890 memory_start >> PAGE_SHIFT, /* start of RAM */ in setup_linux_memory()
891 memory_end >> PAGE_SHIFT /* end of RAM */ in setup_linux_memory()
895 max_mapnr = physpages = (memory_end - memory_start) >> PAGE_SHIFT; in setup_linux_memory()
896 low_top_pfn = (KERNEL_LOWMEM_END - KERNEL_LOWMEM_START) >> PAGE_SHIFT; in setup_linux_memory()
910 min_low_pfn = memory_start >> PAGE_SHIFT; in setup_linux_memory()
912 max_pfn = memory_end >> PAGE_SHIFT; in setup_linux_memory()
916 printk(KERN_NOTICE "%ldMB LOWMEM available.\n", low_top_pfn >> (20 - PAGE_SHIFT)); in setup_linux_memory()
918 free_bootmem(memory_start, low_top_pfn << PAGE_SHIFT); in setup_linux_memory()
922 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", high_mem >> (20 - PAGE_SHIFT)); in setup_linux_memory()
[all …]
Dsys_frv.c39 if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) in sys_mmap2()
43 pgoff >> (PAGE_SHIFT - 12)); in sys_mmap2()
/linux-4.1.27/mm/
Dmincore.c87 unsigned long nr = (end - addr) >> PAGE_SHIFT; in __mincore_unmapped_range()
118 int nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range()
189 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore()
193 return (end - addr) >> PAGE_SHIFT; in do_mincore()
236 pages = len >> PAGE_SHIFT; in SYSCALL_DEFINE3()
264 start += retval << PAGE_SHIFT; in SYSCALL_DEFINE3()
Dmremap.c269 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); in move_vma()
317 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); in move_vma()
321 vm_unacct_memory(excess >> PAGE_SHIFT); in move_vma()
334 mm->locked_vm += new_len >> PAGE_SHIFT; in move_vma()
363 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in vma_to_resize()
365 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) in vma_to_resize()
371 locked = mm->locked_vm << PAGE_SHIFT; in vma_to_resize()
378 if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) in vma_to_resize()
382 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; in vma_to_resize()
437 ((addr - vma->vm_start) >> PAGE_SHIFT), in mremap_to()
[all …]
Dpercpu-km.c50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk()
80 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk()
102 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info()
Dmlock.c481 VM_BUG_ON((start >> PAGE_SHIFT) & page_mask); in munlock_vma_pages_range()
511 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mlock_fixup()
535 nr_pages = (end - start) >> PAGE_SHIFT; in mlock_fixup()
622 lock_limit >>= PAGE_SHIFT; in SYSCALL_DEFINE2()
623 locked = len >> PAGE_SHIFT; in SYSCALL_DEFINE2()
699 lock_limit >>= PAGE_SHIFT; in SYSCALL_DEFINE1()
735 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in user_shm_lock()
739 lock_limit >>= PAGE_SHIFT; in user_shm_lock()
755 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in user_shm_unlock()
Dmmap.c203 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); in __vm_enough_memory()
216 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); in __vm_enough_memory()
222 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); in __vm_enough_memory()
599 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range()
609 nr_pages += overlap_len >> PAGE_SHIFT; in count_vma_pages_range()
757 adjust_next = (end - next->vm_start) >> PAGE_SHIFT; in vma_adjust()
766 adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); in vma_adjust()
837 next->vm_start += adjust_next << PAGE_SHIFT; in vma_adjust()
1038 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; in vma_merge()
1126 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); in anon_vma_compatible()
[all …]
Dnommu.c267 *pfn = address >> PAGE_SHIFT; in follow_pfn()
980 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) in validate_mmap_request()
1211 point = len >> PAGE_SHIFT; in do_mmap_private()
1219 base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL); in do_mmap_private()
1228 region->vm_top = region->vm_start + (total << PAGE_SHIFT); in do_mmap_private()
1239 fpos <<= PAGE_SHIFT; in do_mmap_private()
1345 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in do_mmap_pgoff()
1363 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; in do_mmap_pgoff()
1382 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; in do_mmap_pgoff()
1456 current->mm->total_vm += len >> PAGE_SHIFT; in do_mmap_pgoff()
[all …]
/linux-4.1.27/arch/frv/include/asm/
Dpage.h48 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
57 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
62 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
63 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
Dmem-layout.h24 #define PAGE_SHIFT 14 macro
27 #define PAGE_SIZE (1UL << PAGE_SHIFT)
29 #define PAGE_SIZE (1 << PAGE_SHIFT)
Dpgtable.h268 #define pte_page(x) (mem_map + ((unsigned long)(((x).pte >> PAGE_SHIFT))))
270 #define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
271 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
272 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
366 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
369 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
422 #define mk_pte_phys(physpage, pgprot) pfn_pte((physpage) >> PAGE_SHIFT, pgprot)
436 #define __pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
445 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/linux-4.1.27/arch/powerpc/kvm/
Dbook3s_hv_builtin.c35 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
54 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); in kvm_alloc_hpt()
93 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; in kvm_cma_reserve()
97 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; in kvm_cma_reserve()
99 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); in kvm_cma_reserve()
Dbook3s_32_mmu_host.c45 #if PAGE_SHIFT != 12
166 hpaddr <<= PAGE_SHIFT; in kvmppc_mmu_map_page()
211 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); in kvmppc_mmu_map_page()
217 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); in kvmppc_mmu_map_page()
246 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); in kvmppc_mmu_map_page()
260 pte->pfn = hpaddr >> PAGE_SHIFT; in kvmppc_mmu_map_page()
264 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); in kvmppc_mmu_map_page()
De500_mmu_host.c168 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; in kvmppc_map_magic()
177 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | in kvmppc_map_magic()
320 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | in kvmppc_e500_setup_stlbe()
379 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); in kvmppc_e500_shadow_map()
381 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); in kvmppc_e500_shadow_map()
422 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); in kvmppc_e500_shadow_map()
450 tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); in kvmppc_e500_shadow_map()
461 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); in kvmppc_e500_shadow_map()
526 get_tlb_raddr(gtlbe) >> PAGE_SHIFT, in kvmppc_e500_tlb0_map()
614 gfn_t gfn = gpaddr >> PAGE_SHIFT; in kvmppc_mmu_map()
[all …]
/linux-4.1.27/drivers/xen/
Dxen-balloon.c46 #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
70 balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); in watch_target()
157 balloon_set_new_target(target_bytes >> PAGE_SHIFT); in store_target_kb()
171 << PAGE_SHIFT); in show_target()
187 balloon_set_new_target(target_bytes >> PAGE_SHIFT); in store_target()
Dprivcmd.c210 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) || in mmap_mfn_range()
211 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va)) in mmap_mfn_range()
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) in mmap_mfn_range()
227 st->va += msg->npages << PAGE_SHIFT; in mmap_mfn_range()
450 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) in privcmd_ioctl_mmap_batch()
492 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { in privcmd_ioctl_mmap_batch()
504 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { in privcmd_ioctl_mmap_batch()
584 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in privcmd_close()
641 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, in privcmd_vma_range_is_mapped()
/linux-4.1.27/arch/sparc/kernel/
Dsys_sparc_32.c48 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) in arch_get_unmapped_area()
65 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area()
105 pgoff >> (PAGE_SHIFT - 12)); in sys_mmap2()
113 return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); in sys_mmap()
124 (pgoff >> (PAGE_SHIFT - 12)), flags); in sparc_remap_file_pages()
Dsys_sparc_64.c81 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); in COLOR_ALIGN()
99 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) in arch_get_unmapped_area()
130 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area()
163 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) in arch_get_unmapped_area_topdown()
193 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area_topdown()
269 rnd = (val % (1UL << (23UL-PAGE_SHIFT))); in mmap_rnd()
271 rnd = (val % (1UL << (30UL-PAGE_SHIFT))); in mmap_rnd()
273 return rnd << PAGE_SHIFT; in mmap_rnd()
458 retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); in SYSCALL_DEFINE6()
/linux-4.1.27/arch/ia64/kernel/
Divt.S113 movl r18=PAGE_SHIFT
137 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
143 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
144 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
146 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
147 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
159 dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
165 dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
167 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr)
171 shr.u r19=r22,PAGE_SHIFT // shift pte index into position
[all …]
/linux-4.1.27/drivers/infiniband/hw/usnic/
Dusnic_uiom.c129 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; in usnic_uiom_get_pages()
134 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; in usnic_uiom_get_pages()
204 va = interval->start << PAGE_SHIFT; in usnic_uiom_unmap_sorted_intervals()
205 size = ((interval->last - interval->start) + 1) << PAGE_SHIFT; in usnic_uiom_unmap_sorted_intervals()
226 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; in __usnic_uiom_reg_release()
227 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT; in __usnic_uiom_reg_release()
268 if ((va >> PAGE_SHIFT) < interval_node->start) in usnic_uiom_map_sorted_intervals()
271 if ((va >> PAGE_SHIFT) == interval_node->start) { in usnic_uiom_map_sorted_intervals()
298 if ((va >> PAGE_SHIFT) == interval_node->last) { in usnic_uiom_map_sorted_intervals()
357 npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT; in usnic_uiom_reg_get()
[all …]
/linux-4.1.27/arch/arc/mm/
Dmmap.c21 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
51 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) in arch_get_unmapped_area()
76 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area()
/linux-4.1.27/arch/sh/kernel/
Dsys_sh.c37 return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT); in old_mmap()
48 if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) in sys_mmap2()
51 pgoff >>= PAGE_SHIFT - 12; in sys_mmap2()
Dswsusp.c24 unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; in pfn_is_nosave()
25 unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; in pfn_is_nosave()
/linux-4.1.27/drivers/gpu/drm/gma500/
Dgtt.c45 BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT)); in psb_gtt_mask_pte()
54 return (pfn << PAGE_SHIFT) | mask; in psb_gtt_mask_pte()
72 return dev_priv->gtt_map + (offset >> PAGE_SHIFT); in psb_gtt_entry()
457 >> PAGE_SHIFT; in psb_gtt_init()
467 >> PAGE_SHIFT; in psb_gtt_init()
477 pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT; in psb_gtt_init()
514 gtt_pages << PAGE_SHIFT); in psb_gtt_init()
535 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT; in psb_gtt_init()
536 num_pages = vram_stolen_size >> PAGE_SHIFT; in psb_gtt_init()
538 num_pages, pfn_base << PAGE_SHIFT, 0); in psb_gtt_init()
Dmmu.c146 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset); in psb_mmu_set_pd_context()
172 return (pfn << PAGE_SHIFT) | mask; in psb_mmu_mask_pte()
282 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; in psb_mmu_alloc_pt()
435 return page_to_pfn(pd->p) << PAGE_SHIFT; in psb_get_default_pd_addr()
532 add = desired_tile_stride << PAGE_SHIFT; in psb_mmu_flush_ptes()
533 row_add = hw_tile_stride << PAGE_SHIFT; in psb_mmu_flush_ptes()
577 end = addr + (num_pages << PAGE_SHIFT); in psb_mmu_remove_pfn_sequence()
623 add = desired_tile_stride << PAGE_SHIFT; in psb_mmu_remove_pages()
624 row_add = hw_tile_stride << PAGE_SHIFT; in psb_mmu_remove_pages()
675 end = addr + (num_pages << PAGE_SHIFT); in psb_mmu_insert_pfn_sequence()
[all …]
/linux-4.1.27/arch/blackfin/kernel/
Ddma-mapping.c34 dma_pages = dma_size >> PAGE_SHIFT; in dma_alloc_init()
44 return ((size - 1) >> PAGE_SHIFT) + 1; in get_pages()
59 ret = dma_base + (start << PAGE_SHIFT); in __alloc_dma_pages()
68 unsigned long page = (addr - dma_base) >> PAGE_SHIFT; in __free_dma_pages()
/linux-4.1.27/arch/openrisc/mm/
Dioremap.c67 if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS) in __ioremap()
70 fixmaps_used += (size >> PAGE_SHIFT); in __ioremap()
77 fixmaps_used -= (size >> PAGE_SHIFT); in __ioremap()
/linux-4.1.27/arch/um/include/asm/
Dpage.h13 #define PAGE_SHIFT 12 macro
14 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
112 #define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
113 #define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
/linux-4.1.27/drivers/parisc/
Dsba_iommu.c379 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT); in sba_search_bitmap()
578 pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */ in sba_io_pdir_entry()
637 iovp |= get_order(byte_cnt) + PAGE_SHIFT; in sba_mark_invalid()
1045 …ages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; in sba_unmap_sg()
1279 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT)); in sba_ioc_init_pluto()
1284 iov_order + PAGE_SHIFT); in sba_ioc_init_pluto()
1297 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; in sba_ioc_init_pluto()
1298 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); in sba_ioc_init_pluto()
1309 iova_space_mask <<= (iov_order + PAGE_SHIFT); in sba_ioc_init_pluto()
1329 switch (PAGE_SHIFT) { in sba_ioc_init_pluto()
[all …]
/linux-4.1.27/arch/x86/kernel/
Damd_gart_64.c102 PAGE_SIZE) >> PAGE_SHIFT; in alloc_iommu()
104 PAGE_SIZE) >> PAGE_SHIFT; in alloc_iommu()
277 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; in gart_unmap_page()
471 iommu_full(dev, pages << PAGE_SHIFT, dir); in gart_map_sg()
673 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); in init_amd_gatt()
678 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) in init_amd_gatt()
769 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); in gart_iommu_init()
773 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); in gart_iommu_init()
777 iommu_pages = iommu_size >> PAGE_SHIFT; in gart_iommu_init()
807 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); in gart_iommu_init()
[all …]
Despfix_64.c53 #define ESPFIX_PAGE_SPACE (1UL << (PGDIR_SHIFT-PAGE_SHIFT-16))
90 addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE); in espfix_base_addr()
175 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); in init_espfix_ap()
185 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); in init_espfix_ap()
/linux-4.1.27/arch/x86/kvm/
Diommu.c108 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) in kvm_iommu_map_pages()
112 while ((gfn << PAGE_SHIFT) & (page_size - 1)) in kvm_iommu_map_pages()
123 pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT); in kvm_iommu_map_pages()
135 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT); in kvm_iommu_map_pages()
139 gfn += page_size >> PAGE_SHIFT; in kvm_iommu_map_pages()
299 pfn = phys >> PAGE_SHIFT; in kvm_iommu_put_pages()
/linux-4.1.27/arch/alpha/mm/
Dnuma.c70 node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT; in setup_memory_node()
71 node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT); in setup_memory_node()
117 mem_size_limit << (PAGE_SHIFT - 10), in setup_memory_node()
118 node_max_pfn << (PAGE_SHIFT - 10)); in setup_memory_node()
134 node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); in setup_memory_node()
302 dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; in paging_init()
Dinit.c109 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT; in switch_to_system_map()
202 console_remap_vm.size = nr_pages << PAGE_SHIFT; in callback_init()
210 unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT; in callback_init()
244 dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; in paging_init()
/linux-4.1.27/arch/powerpc/kernel/
Dsuspend.c20 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; in pfn_is_nosave()
21 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; in pfn_is_nosave()
Dhead_8xx.S329 rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
336 rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
381 rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
387 rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
388 rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
511 3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
515 rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
518 rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31
898 .align PAGE_SHIFT
/linux-4.1.27/arch/blackfin/include/asm/
Dpage.h10 #define ARCH_PFN_OFFSET (CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT)
11 #define MAP_NR(addr) ((unsigned long)(addr) >> PAGE_SHIFT)
/linux-4.1.27/arch/arc/include/asm/
Dpage.h74 #define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
95 (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
97 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
Dpgtable.h180 #define BITS_IN_PAGE PAGE_SHIFT
261 PAGE_SHIFT)))
270 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
271 #define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
272 #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/linux-4.1.27/drivers/char/agp/
Di460-agp.c41 #define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
154 values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); in i460_fetch_size()
195 if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) in i460_cleanup()
233 if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) { in i460_configure()
368 unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT; in i460_alloc_large_page()
396 __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT); in i460_free_large_page()
503 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) in i460_insert_memory()
512 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) in i460_remove_memory()
530 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { in i460_alloc_page()
541 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { in i460_destroy_page()
/linux-4.1.27/drivers/lguest/
Dpage_tables.c104 page = __va(pgd_pfn(spgd) << PAGE_SHIFT); in spmd_addr()
119 pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT); in spte_addr()
124 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); in spte_addr()
146 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; in gpmd_addr()
155 unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT; in gpte_addr()
165 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; in gpte_addr()
193 if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1) in get_pfn()
482 *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); in demand_page()
593 pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT); in release_pmd()
609 pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); in release_pgd()
[all …]
/linux-4.1.27/drivers/edac/
Dpasemi_edac.c157 dimm->nr_pages = 128 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows()
160 dimm->nr_pages = 256 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows()
164 dimm->nr_pages = 512 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows()
167 dimm->nr_pages = 1024 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows()
170 dimm->nr_pages = 2048 << (20 - PAGE_SHIFT); in pasemi_edac_init_csrows()
Dedac_core.h41 #if PAGE_SHIFT < 20
42 #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT))
43 #define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
45 #define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20))
46 #define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20))
/linux-4.1.27/arch/um/kernel/
Dphysmem.c31 phys_pages = physmem >> PAGE_SHIFT; in mem_total_pages()
32 iomem_pages = iomem >> PAGE_SHIFT; in mem_total_pages()
33 highmem_pages = highmem >> PAGE_SHIFT; in mem_total_pages()
85 unsigned long delta = (len - reserve) >> PAGE_SHIFT; in setup_physmem()
/linux-4.1.27/include/sound/
Dmemalloc.h100 return (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in snd_sgbuf_aligned_pages()
110 dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; in snd_sgbuf_get_addr()
122 return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE; in snd_sgbuf_get_ptr()
/linux-4.1.27/arch/microblaze/kernel/
Dsys_microblaze.c43 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); in SYSCALL_DEFINE6()
54 pgoff >> (PAGE_SHIFT - 12)); in SYSCALL_DEFINE6()
/linux-4.1.27/arch/sh/include/mach-dreamcast/mach/
Dmaple.h9 #define MAPLE_DMA_PAGES ((MAPLE_DMA_ORDER > PAGE_SHIFT) ? \
10 MAPLE_DMA_ORDER - PAGE_SHIFT : 0)
/linux-4.1.27/arch/powerpc/boot/
Dpage.h20 #define PAGE_SHIFT 12 macro
21 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
/linux-4.1.27/arch/x86/realmode/
Dinit.c116 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); in set_real_mode_permissions()
117 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); in set_real_mode_permissions()
118 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); in set_real_mode_permissions()
/linux-4.1.27/arch/alpha/kernel/
Dpci-sysfs.c28 vma->vm_pgoff += base >> PAGE_SHIFT; in hose_mmap_page_range()
41 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in __pci_mmap_fits()
43 size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1; in __pci_mmap_fits()
87 vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); in pci_mmap_resource()
258 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in __legacy_mmap_fits()
260 size = ((res_size - 1) >> PAGE_SHIFT) + 1; in __legacy_mmap_fits()
/linux-4.1.27/arch/mips/ar7/
Dmemory.c63 pages = memsize() >> PAGE_SHIFT; in prom_meminit()
64 add_memory_region(PHYS_OFFSET, pages << PAGE_SHIFT, BOOT_MEM_RAM); in prom_meminit()
/linux-4.1.27/drivers/gpu/drm/radeon/
Dradeon_object.c54 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; in radeon_update_memory_usage()
109 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; in radeon_ttm_placement_from_domain()
172 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; in radeon_ttm_placement_from_domain()
186 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; in radeon_bo_create()
355 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; in radeon_bo_pin_restricted()
357 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; in radeon_bo_pin_restricted()
636 bo->tbo.mem.start << PAGE_SHIFT, in radeon_bo_get_surface_reg()
637 bo->tbo.num_pages << PAGE_SHIFT); in radeon_bo_get_surface_reg()
792 size = bo->mem.num_pages << PAGE_SHIFT; in radeon_bo_fault_reserve_notify()
793 offset = bo->mem.start << PAGE_SHIFT; in radeon_bo_fault_reserve_notify()
[all …]
/linux-4.1.27/drivers/vfio/
Dvfio_iommu_type1.c247 *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in vaddr_get_pfn()
265 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; in vfio_pin_pages()
282 limit << PAGE_SHIFT); in vfio_pin_pages()
310 __func__, limit << PAGE_SHIFT); in vfio_pin_pages()
385 unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT, in vfio_unmap_unpin()
386 unmapped >> PAGE_SHIFT, in vfio_unmap_unpin()
509 (phys_addr_t)pfn << PAGE_SHIFT, in map_try_harder()
528 ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, in vfio_iommu_map()
529 npage << PAGE_SHIFT, prot | d->prot); in vfio_iommu_map()
543 iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); in vfio_iommu_map()
[all …]
/linux-4.1.27/drivers/gpu/drm/omapdrm/
Domap_gem.c160 (entry->obj_pgoff << PAGE_SHIFT); in evict_entry()
230 int npages = obj->size >> PAGE_SHIFT; in omap_gem_attach_pages()
284 int i, npages = obj->size >> PAGE_SHIFT; in omap_gem_detach_pages()
375 vma->vm_start) >> PAGE_SHIFT; in fault_1d()
382 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; in fault_1d()
386 pfn, pfn << PAGE_SHIFT); in fault_1d()
422 vma->vm_start) >> PAGE_SHIFT; in fault_2d()
433 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); in fault_2d()
454 vaddr += off << PAGE_SHIFT; in fault_2d()
475 pfn = entry->paddr >> PAGE_SHIFT; in fault_2d()
[all …]
/linux-4.1.27/arch/hexagon/mm/
Dinit.c151 high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT); in paging_init()
198 bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) & in setup_arch_memory()
233 segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT); in setup_arch_memory()
/linux-4.1.27/drivers/sbus/char/
Dflash.c64 if ((vma->vm_pgoff << PAGE_SHIFT) > size) in flash_mmap()
66 addr = vma->vm_pgoff + (addr >> PAGE_SHIFT); in flash_mmap()
68 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) in flash_mmap()
69 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); in flash_mmap()
/linux-4.1.27/arch/m68k/sun3/
Ddvma.c36 if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) { in dvma_page()
38 ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte; in dvma_page()

123456