/linux-4.1.27/arch/unicore32/include/asm/ |
D | memory.h | 33 #define PAGE_OFFSET UL(0xC0000000) macro 34 #define TASK_SIZE (PAGE_OFFSET - UL(0x41000000)) 35 #define TASK_UNMAPPED_BASE (PAGE_OFFSET / 3) 41 #define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) 46 #define MODULES_END (PAGE_OFFSET) 59 #define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) 60 #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) 110 #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && \
|
/linux-4.1.27/arch/alpha/include/uapi/asm/ |
D | setup.h | 22 #define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) 24 #define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) 25 #define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) 26 #define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) 27 #define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) 29 #define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
|
/linux-4.1.27/arch/arm/include/asm/ |
D | memory.h | 34 #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) macro 55 #define MODULES_VADDR (PAGE_OFFSET - SZ_16M) 58 #define MODULES_VADDR (PAGE_OFFSET - SZ_8M) 69 #define MODULES_END (PAGE_OFFSET - PMD_SIZE) 71 #define MODULES_END (PAGE_OFFSET) 108 #define MODULES_VADDR PAGE_OFFSET 191 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ 257 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; in __virt_to_phys() 262 return x - PHYS_OFFSET + PAGE_OFFSET; in __phys_to_virt() 266 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ [all …]
|
D | dma.h | 12 arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \ 13 (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
|
D | highmem.h | 6 #define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
|
D | pgtable-3level-hwdef.h | 106 #define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
|
D | kvm_mmu.h | 30 #define HYP_PAGE_OFFSET PAGE_OFFSET
|
/linux-4.1.27/arch/x86/mm/ |
D | physaddr.c | 23 x = y + (__START_KERNEL_map - PAGE_OFFSET); in __phys_addr() 56 x = y + (__START_KERNEL_map - PAGE_OFFSET); in __virt_addr_valid() 72 unsigned long phys_addr = x - PAGE_OFFSET; in __phys_addr() 74 VIRTUAL_BUG_ON(x < PAGE_OFFSET); in __phys_addr() 88 if (x < PAGE_OFFSET) in __virt_addr_valid() 94 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); in __virt_addr_valid()
|
D | init_32.c | 293 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init() 301 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init() 308 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; in kernel_physical_mapping_init() 327 PAGE_OFFSET + PAGE_SIZE-1; in kernel_physical_mapping_init() 344 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init() 471 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); in native_pagetable_init()
|
D | kasan_init_64.c | 216 kasan_mem_to_shadow((void *)PAGE_OFFSET)); in kasan_init() 225 populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), in kasan_init()
|
D | highmem_32.c | 97 BUG_ON(vaddr < PAGE_OFFSET); in __kunmap_atomic()
|
D | dump_pagetables.c | 73 { PAGE_OFFSET, "Low Kernel Mapping" }, 86 { PAGE_OFFSET, "Kernel Mapping" },
|
/linux-4.1.27/arch/openrisc/include/asm/ |
D | page.h | 33 #define PAGE_OFFSET 0xc0000000 macro 34 #define KERNELBASE PAGE_OFFSET 79 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) 80 #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 86 (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) 88 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
|
D | pgtable.h | 96 #define VMALLOC_START (PAGE_OFFSET-0x04000000) 97 #define VMALLOC_END (PAGE_OFFSET) 354 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) 358 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
|
/linux-4.1.27/arch/alpha/include/asm/ |
D | page.h | 68 #define PAGE_OFFSET 0xffff800000000000UL macro 70 #define PAGE_OFFSET 0xfffffc0000000000UL macro 76 #define PAGE_OFFSET 0xffff800000000000 macro 78 #define PAGE_OFFSET 0xfffffc0000000000 macro 83 #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 84 #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
|
D | pgtable.h | 226 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } in pmd_set() 229 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } in pgd_set() 235 return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET; in pmd_page_vaddr() 244 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); } in pgd_page_vaddr()
|
D | pgalloc.h | 16 pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET)); in pmd_populate()
|
/linux-4.1.27/arch/arm/mach-realview/include/mach/ |
D | memory.h | 47 #define PAGE_OFFSET1 (PAGE_OFFSET + 0x10000000) 48 #define PAGE_OFFSET2 (PAGE_OFFSET + 0x30000000) 55 (phys) + PAGE_OFFSET) 60 (virt) - PAGE_OFFSET)
|
/linux-4.1.27/arch/c6x/kernel/ |
D | setup.c | 256 if (start > PAGE_OFFSET || PAGE_OFFSET >= (start + size)) in c6x_add_memory() 312 if (mem_size && (PAGE_OFFSET + PAGE_ALIGN(mem_size)) < memory_end) in setup_arch() 313 memory_end = PAGE_OFFSET + PAGE_ALIGN(mem_size); in setup_arch() 316 memblock_add(PAGE_OFFSET, memory_end - PAGE_OFFSET); in setup_arch() 319 memblock_reserve(PAGE_OFFSET, in setup_arch() 320 PAGE_ALIGN((unsigned long)&_end - PAGE_OFFSET)); in setup_arch() 364 PAGE_OFFSET >> PAGE_SHIFT, in setup_arch() 405 free_bootmem(PAGE_OFFSET, memory_end - PAGE_OFFSET); in setup_arch()
|
D | vmlinux.lds.S | 20 . = PAGE_OFFSET;
|
/linux-4.1.27/arch/sh/include/asm/ |
D | page.h | 129 #define PAGE_OFFSET CONFIG_PAGE_OFFSET macro 141 #define ___pa(x) ((x)-PAGE_OFFSET+__MEMORY_START) 142 #define ___va(x) ((x)+PAGE_OFFSET-__MEMORY_START) 144 #define ___pa(x) ((x)-PAGE_OFFSET) 145 #define ___va(x) ((x)+PAGE_OFFSET) 158 #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start) 159 #define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
|
D | barrier.h | 30 #define ctrl_barrier() __icbi(PAGE_OFFSET)
|
D | segment.h | 21 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
|
/linux-4.1.27/arch/tile/kernel/ |
D | head_64.S | 74 moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET) 77 shl16insli r4, r4, hw0(swapper_pgprot - PAGE_OFFSET) 83 moveli r0, hw1_last(.Lsv_data_pmd - PAGE_OFFSET) 84 moveli r6, hw1_last(temp_data_pmd - PAGE_OFFSET) 93 shl16insli r0, r0, hw0(.Lsv_data_pmd - PAGE_OFFSET) 94 shl16insli r6, r6, hw0(temp_data_pmd - PAGE_OFFSET) 125 moveli r0, hw1_last(swapper_pg_dir - PAGE_OFFSET) 129 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET) 229 .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
|
D | head_32.S | 51 moveli r6, lo16(swapper_pgprot - PAGE_OFFSET) 55 moveli r0, lo16(swapper_pg_dir - PAGE_OFFSET) 56 auli r6, r6, ha16(swapper_pgprot - PAGE_OFFSET) 64 auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET) 159 .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT 160 PTE addr + PAGE_OFFSET, addr, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
|
D | stack.c | 47 return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory; in in_kernel_stack() 62 } else if (address >= PAGE_OFFSET) { in read_memory_func() 108 p->sp < PAGE_OFFSET && p->sp != 0) { in valid_fault_handler() 134 if (is_sigreturn(b->pc) && b->sp < PAGE_OFFSET && in valid_sigframe() 304 if (address >= PAGE_OFFSET) { in describe_addr() 402 if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm) in tile_show_stack() 496 if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) in save_stack_trace_tsk()
|
D | vmlinux.lds.S | 63 . = (. - TEXT_OFFSET + PAGE_OFFSET); 65 #define LOAD_OFFSET PAGE_OFFSET
|
D | setup.c | 286 unsigned long vaddr = PAGE_OFFSET; in setup_pa_va_mapping() 318 vaddr_end = PAGE_OFFSET + in setup_pa_va_mapping() 323 vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT); in setup_pa_va_mapping() 544 min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET); in setup_memory() 702 initrd_start += PAGE_OFFSET; in setup_bootmem_allocator() 703 initrd_end += PAGE_OFFSET; in setup_bootmem_allocator() 1188 if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text) in validate_hv() 1696 enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET }; in request_standard_resources()
|
D | kprobes.c | 119 addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET; in arch_arm_kprobe() 134 addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET; in arch_disarm_kprobe()
|
D | ftrace.c | 120 pc_wr = pc - MEM_SV_START + PAGE_OFFSET; in ftrace_modify_code()
|
D | machine_kexec.c | 228 #define QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
|
/linux-4.1.27/arch/arm64/include/asm/ |
D | memory.h | 51 #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) macro 52 #define MODULES_END (PAGE_OFFSET) 80 #define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET)) 81 #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET)) 122 #define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; })
|
D | kvm_mmu.h | 35 #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) 74 #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
|
/linux-4.1.27/arch/tile/include/asm/ |
D | page.h | 177 #define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1))) macro 230 #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) macro 274 return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory); in virt_addr_valid() 281 return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT; in kaddr_to_pfn() 286 return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET); in pfn_to_kaddr() 291 return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET); in virt_to_phys() 296 return (void *)((unsigned long)paddr + PAGE_OFFSET); in phys_to_virt()
|
D | pgtable_32.h | 74 #define MAXMEM (_VMALLOC_START - PAGE_OFFSET)
|
D | processor.h | 163 #define TASK_SIZE_MAX PAGE_OFFSET
|
/linux-4.1.27/arch/m68k/mm/ |
D | sun3mmu.c | 52 address = PAGE_OFFSET; in paging_init() 64 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; in paging_init() 88 zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; in paging_init() 93 (__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL); in paging_init()
|
D | init.c | 103 zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; in paging_init() 141 MLM(PAGE_OFFSET, (unsigned long)high_memory), in print_memmap()
|
D | mcfmmu.c | 56 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; in paging_init() 58 address = PAGE_OFFSET; in paging_init()
|
D | motorola.c | 244 m68k_memoffset = min_addr - PAGE_OFFSET; in paging_init()
|
/linux-4.1.27/arch/metag/include/asm/ |
D | cacheflush.h | 33 metag_code_cache_flush_all((void *) PAGE_OFFSET); in __flush_cache_all() 34 metag_data_cache_flush_all((void *) PAGE_OFFSET); in __flush_cache_all() 64 metag_data_cache_flush_all((void *) PAGE_OFFSET); in flush_dcache_page() 78 metag_data_cache_flush_all((void *) PAGE_OFFSET); in flush_cache_vmap() 83 metag_data_cache_flush_all((void *) PAGE_OFFSET); in flush_cache_vunmap() 111 metag_code_cache_flush_all((void *) PAGE_OFFSET); in flush_dcache_page()
|
D | mmu.h | 55 (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET)); in mmu_phys0_addr() 71 (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET)); in mmu_phys1_addr()
|
D | page.h | 90 #define PAGE_OFFSET (CONFIG_PAGE_OFFSET) macro 92 #if PAGE_OFFSET >= LINGLOBAL_BASE
|
D | uaccess.h | 23 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) 43 ((addr) > PAGE_OFFSET && \
|
D | pgtable.h | 12 #if PAGE_OFFSET >= LINGLOBAL_BASE 179 #if PAGE_OFFSET >= LINGLOBAL_BASE
|
D | processor.h | 21 #define TASK_SIZE PAGE_OFFSET
|
/linux-4.1.27/arch/frv/include/asm/ |
D | virtconvert.h | 24 #define phys_to_virt(vaddr) ((void *) ((unsigned long)(vaddr) + PAGE_OFFSET)) 25 #define virt_to_phys(vaddr) ((unsigned long) (vaddr) - PAGE_OFFSET) 37 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
|
D | mem-layout.h | 45 #define PAGE_OFFSET ((unsigned long) &__page_offset) macro 74 #define TASK_SIZE (PAGE_OFFSET)
|
D | dma.h | 22 #define MAX_DMA_ADDRESS PAGE_OFFSET
|
D | page.h | 57 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
|
/linux-4.1.27/arch/cris/include/asm/ |
D | page.h | 42 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 43 #define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr) 53 #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT)) 60 #define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
|
D | dma.h | 11 #define MAX_DMA_ADDRESS PAGE_OFFSET
|
D | pgtable.h | 213 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) 217 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
|
/linux-4.1.27/arch/nios2/include/asm/ |
D | page.h | 31 #define PAGE_OFFSET \ macro 82 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 84 ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) 87 ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
|
/linux-4.1.27/arch/hexagon/include/asm/ |
D | mem-layout.h | 32 #define PAGE_OFFSET _AC(0xc0000000, UL) macro 54 #define TASK_SIZE (PAGE_OFFSET) 67 #define MIN_KERNEL_SEG (PAGE_OFFSET >> PGDIR_SHIFT) /* L1 shift is 22 bits */
|
D | page.h | 99 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 100 #define __va(x) ((void *)((unsigned long)(x) - PHYS_OFFSET + PAGE_OFFSET))
|
D | dma.h | 25 #define MAX_DMA_ADDRESS (PAGE_OFFSET)
|
/linux-4.1.27/include/asm-generic/ |
D | page.h | 69 #define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS) macro 71 #define PAGE_OFFSET (0) macro 75 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 95 #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
|
D | dma.h | 10 #define MAX_DMA_ADDRESS PAGE_OFFSET
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | page.h | 89 #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) macro 122 #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) 216 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET)) 220 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) 221 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) 256 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
|
D | kdump.h | 25 #define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET) 26 #define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
|
/linux-4.1.27/arch/score/include/asm/ |
D | page.h | 31 #define PAGE_OFFSET (0xA0000000UL) macro 61 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) 62 #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) 79 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
|
/linux-4.1.27/arch/hexagon/mm/ |
D | init.c | 34 #define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET)) 223 segtable = segtable + (PAGE_OFFSET >> 22); in setup_arch_memory() 256 (unsigned long) _K_init_devicetable-PAGE_OFFSET); in setup_arch_memory() 257 *segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) | in setup_arch_memory()
|
/linux-4.1.27/arch/m68k/include/asm/ |
D | page_no.h | 28 #define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) 29 #define page_to_virt(page) __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)) 35 #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
|
D | page_mm.h | 102 if(x >= PAGE_OFFSET) in ___pa() 103 return (x-PAGE_OFFSET); in ___pa() 114 return (void *)(x+PAGE_OFFSET); in __va() 171 #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_me…
|
D | mmu_context.h | 115 if (mmuar >= PAGE_OFFSET) { in load_ksp_mmu() 133 pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar) in load_ksp_mmu() 140 if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET) in load_ksp_mmu()
|
D | virtconvert.h | 31 __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
|
D | page.h | 16 #define PAGE_OFFSET (PAGE_OFFSET_RAW) macro
|
D | sun3_pgalloc.h | 95 memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT)); in pgd_alloc()
|
D | mcf_pgalloc.h | 100 memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT); in pgd_alloc()
|
/linux-4.1.27/arch/metag/mm/ |
D | mmu-meta2.c | 71 if (is_global_space(PAGE_OFFSET)) in mmu_get_base() 138 unsigned long mem_size = mem_end - PAGE_OFFSET; in mmu_init() 172 addr = PAGE_OFFSET; in mmu_init() 173 entry = pgd_index(PAGE_OFFSET); in mmu_init()
|
D | mmu-meta1.c | 56 if (is_global_space(PAGE_OFFSET)) in __get_mmu_base() 143 addr = PAGE_OFFSET; in mmu_init() 144 entry = pgd_index(PAGE_OFFSET); in mmu_init()
|
/linux-4.1.27/arch/sparc/include/asm/ |
D | page_32.h | 118 #define PAGE_OFFSET 0xf0000000 macro 123 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + phys_base) 124 #define __va(x) ((void *)((unsigned long) (x) - phys_base + PAGE_OFFSET)) 133 #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
|
D | page_64.h | 120 extern unsigned long PAGE_OFFSET; 135 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) 136 #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
|
D | processor_32.h | 31 #define TASK_SIZE PAGE_OFFSET 33 #define STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
|
/linux-4.1.27/arch/arc/include/asm/ |
D | processor.h | 110 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 111 #define VMALLOC_END (PAGE_OFFSET) 125 #define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
|
/linux-4.1.27/arch/xtensa/include/asm/ |
D | page.h | 37 #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR macro 40 #define PAGE_OFFSET __XTENSA_UL_CONST(0) macro 172 #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 173 #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
|
D | dma.h | 48 #define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
|
/linux-4.1.27/arch/arm/mach-footbridge/ |
D | common.c | 240 WARN_ON(res < PAGE_OFFSET || res >= (unsigned long)high_memory); in __virt_to_bus() 242 return res + (fb_bus_sdram_offset() - PAGE_OFFSET); in __virt_to_bus() 248 res = res - (fb_bus_sdram_offset() - PAGE_OFFSET); in __bus_to_virt() 250 WARN_ON(res < PAGE_OFFSET || res >= (unsigned long)high_memory); in __bus_to_virt()
|
D | dc21285.c | 294 mem_size = (unsigned int)high_memory - PAGE_OFFSET; in dc21285_preinit() 357 *CSR_PCISDRAMBASE = __virt_to_bus(PAGE_OFFSET); in dc21285_preinit()
|
/linux-4.1.27/arch/powerpc/mm/ |
D | 44x_mmu.c | 191 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr); in mmu_mapin_ram() 193 ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); in mmu_mapin_ram() 249 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr); in mmu_init_secondary() 251 ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); in mmu_init_secondary()
|
D | ppc_mmu_32.c | 95 setbat(2, PAGE_OFFSET, 0, bl, PAGE_KERNEL_X); in mmu_mapin_ram() 96 done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1; in mmu_mapin_ram() 103 setbat(3, PAGE_OFFSET+done, done, bl, PAGE_KERNEL_X); in mmu_mapin_ram() 104 done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1; in mmu_mapin_ram()
|
D | fsl_booke_mmu.c | 204 unsigned long virt = PAGE_OFFSET; in map_mem_in_cams() 218 return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; in mmu_mapin_ram() 267 virt_phys_offset = PAGE_OFFSET - memstart_addr; in relocate_init() 308 map_mem_in_cams_addr(start, PAGE_OFFSET + offset, in relocate_init()
|
D | slb.c | 107 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) { in __slb_flush_and_rebolt() 316 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0); in slb_initialize() 327 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) in slb_initialize()
|
D | init_32.c | 53 #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
|
/linux-4.1.27/arch/arm/mach-zynq/ |
D | platsmp.c | 52 if (__pa(PAGE_OFFSET)) { in zynq_cpun_start() 59 zero = (__force u8 __iomem *)PAGE_OFFSET; in zynq_cpun_start() 76 if (__pa(PAGE_OFFSET)) in zynq_cpun_start()
|
D | common.c | 61 if (!__pa(PAGE_OFFSET)) in zynq_memory_init() 62 memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir)); in zynq_memory_init()
|
/linux-4.1.27/arch/arc/include/uapi/asm/ |
D | page.h | 30 #define PAGE_OFFSET (0x80000000) macro 33 #define PAGE_OFFSET (0x80000000UL) /* Kernel starts at 2G onwards */ macro
|
/linux-4.1.27/arch/mips/include/asm/ |
D | page.h | 173 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 175 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) 234 #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) 235 #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
|
D | dma.h | 88 #define MAX_DMA_ADDRESS PAGE_OFFSET 90 #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
|
D | io.h | 138 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); in phys_to_virt() 146 return (unsigned long)address - PAGE_OFFSET; in isa_virt_to_bus() 151 return (void *)(address + PAGE_OFFSET); in isa_bus_to_virt()
|
/linux-4.1.27/arch/m32r/include/asm/ |
D | page.h | 68 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) macro 69 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) 70 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
|
D | dma.h | 10 #define MAX_DMA_ADDRESS (PAGE_OFFSET+0x20000000)
|
/linux-4.1.27/arch/cris/include/arch-v10/arch/ |
D | page.h | 9 #define PAGE_OFFSET KSEG_6 /* kseg_6 is mapped to physical ram */ macro 11 #define PAGE_OFFSET KSEG_C /* kseg_c is mapped to physical ram */ macro
|
/linux-4.1.27/arch/arm/mach-footbridge/include/mach/ |
D | memory.h | 46 #define __virt_to_bus(x) ((x) + (BUS_OFFSET - PAGE_OFFSET)) 47 #define __bus_to_virt(x) ((x) - (BUS_OFFSET - PAGE_OFFSET))
|
/linux-4.1.27/arch/hexagon/kernel/ |
D | head.S | 53 r1.h = #HI(PAGE_OFFSET); 54 r1.l = #LO(PAGE_OFFSET); 109 R1.H = #HI(PAGE_OFFSET >> (22 - 2)) 110 R1.L = #LO(PAGE_OFFSET >> (22 - 2))
|
D | vmlinux.lds.S | 39 . = PAGE_OFFSET;
|
/linux-4.1.27/arch/c6x/mm/ |
D | init.c | 52 zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT; in paging_init() 54 __pa(PAGE_OFFSET) >> PAGE_SHIFT; in paging_init()
|
/linux-4.1.27/arch/openrisc/kernel/ |
D | vmlinux.lds.S | 26 # define LOAD_OFFSET PAGE_OFFSET 27 # define LOAD_BASE PAGE_OFFSET
|
/linux-4.1.27/arch/arm/mach-ks8695/include/mach/ |
D | memory.h | 23 #define __virt_to_bus(x) ((x) - PAGE_OFFSET + KS8695_PCIMEM_PA) 24 #define __bus_to_virt(x) ((x) - KS8695_PCIMEM_PA + PAGE_OFFSET)
|
/linux-4.1.27/arch/arm/mach-omap1/include/mach/ |
D | memory.h | 24 #define virt_to_lbus(x) ((x) - PAGE_OFFSET + OMAP1510_LB_OFFSET) 25 #define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET)
|
/linux-4.1.27/arch/x86/kernel/ |
D | dumpstack_32.c | 147 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { in show_regs() 153 if (ip < (u8 *)PAGE_OFFSET || in show_regs() 171 if (ip < PAGE_OFFSET) in is_valid_bugaddr()
|
D | doublefault.c | 17 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
|
D | dumpstack_64.c | 326 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { in show_regs() 332 if (ip < (u8 *)PAGE_OFFSET || in show_regs()
|
/linux-4.1.27/arch/mips/include/asm/mach-generic/ |
D | spaces.h | 92 #ifndef PAGE_OFFSET 93 #define PAGE_OFFSET (CAC_BASE + PHYS_OFFSET) macro
|
/linux-4.1.27/arch/parisc/include/asm/ |
D | page.h | 125 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) macro 140 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) 141 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
|
/linux-4.1.27/arch/sh/kernel/ |
D | vmlinux.lds.S | 6 #define LOAD_OFFSET PAGE_OFFSET 26 . = PAGE_OFFSET + MEMORY_OFFSET + PHYSICAL_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
|
D | traps.c | 113 if (addr < PAGE_OFFSET) in is_valid_bugaddr()
|
/linux-4.1.27/arch/microblaze/include/asm/ |
D | page.h | 57 #define PAGE_OFFSET __page_offset macro 68 #define PAGE_OFFSET CONFIG_KERNEL_START macro 167 # define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
|
/linux-4.1.27/arch/avr32/include/asm/ |
D | page.h | 66 #define PAGE_OFFSET (0x80000000UL) macro 79 #define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
|
/linux-4.1.27/arch/ia64/include/asm/ |
D | page.h | 59 # define __pa(x) ((x) - PAGE_OFFSET) 60 # define __va(x) ((x) + PAGE_OFFSET) 217 #define PAGE_OFFSET RGN_BASE(RGN_KERNEL) macro
|
D | pgtable.h | 268 #define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET)) 275 #define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET)) 282 #define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET)) 290 #define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET))
|
D | io.h | 82 return (unsigned long) address - PAGE_OFFSET; in virt_to_phys() 88 return (void *) (address + PAGE_OFFSET); in phys_to_virt()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | head_44x.S | 189 lis r5,PAGE_OFFSET@h 190 ori r5,r5,PAGE_OFFSET@l 319 lis r11, PAGE_OFFSET@h 430 lis r11, PAGE_OFFSET@h 566 lis r11,PAGE_OFFSET@h 663 lis r11,PAGE_OFFSET@h 899 lis r3,PAGE_OFFSET@h 900 ori r3,r3,PAGE_OFFSET@l 1111 lis r3,PAGE_OFFSET@h 1112 ori r3,r3,PAGE_OFFSET@l [all …]
|
D | head_8xx.S | 325 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 330 lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ 378 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 382 lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ 509 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 512 lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ 851 li r5, (swapper_pg_dir-PAGE_OFFSET)@l
|
D | fsl_booke_entry_mapping.S | 170 lis r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@h 171 ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@l
|
D | head_32.S | 498 lis r1,PAGE_OFFSET@h /* check if kernel address */ 572 lis r1,PAGE_OFFSET@h /* check if kernel address */ 656 lis r1,PAGE_OFFSET@h /* check if kernel address */ 1134 lis r11,PAGE_OFFSET@h
|
/linux-4.1.27/Documentation/arm/ |
D | Porting | 10 phys = virt - PAGE_OFFSET + PHYS_OFFSET 60 PAGE_OFFSET 62 boot phase, virtual address PAGE_OFFSET will be mapped to physical 80 hence PAGE_OFFSET). 83 Virtual start address of kernel, normally PAGE_OFFSET + 0x8000.
|
D | memory.txt | 59 PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region. 63 PKMAP_BASE PAGE_OFFSET-1 Permanent kernel mappings
|
/linux-4.1.27/arch/arm64/kernel/ |
D | vmlinux.lds.S | 84 . = PAGE_OFFSET + TEXT_OFFSET; 182 ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
|
D | head.S | 43 #elif (PAGE_OFFSET & 0x1fffff) != 0 44 #error PAGE_OFFSET must be at least 2MB aligned 435 mov x5, #PAGE_OFFSET 447 mov x6, #PAGE_OFFSET
|
/linux-4.1.27/arch/tile/mm/ |
D | fault.c | 79 if (address >= PAGE_OFFSET) in SYSCALL_DEFINE1() 401 if (regs->sp < PAGE_OFFSET) { in handle_page_fault() 614 if ((pc & 0x7) != 0 || pc < PAGE_OFFSET || in do_page_fault_ics() 896 static unsigned long start = PAGE_OFFSET; in vmalloc_sync_all() 899 BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK); in vmalloc_sync_all() 900 for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) { in vmalloc_sync_all()
|
D | pgtable.c | 111 BUG_ON(addr < PAGE_OFFSET); /* only for kernel LOWMEM */ in shatter_huge_page() 178 #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET) 366 BUG_ON(kaddr < PAGE_OFFSET); in virt_to_kpte()
|
D | highmem.c | 257 BUG_ON(vaddr < PAGE_OFFSET); in __kunmap_atomic()
|
/linux-4.1.27/arch/sparc/mm/ |
D | init_64.c | 424 if (kaddr >= PAGE_OFFSET) in flush_icache_range() 787 initrd_start += PAGE_OFFSET; in find_ramdisk() 788 initrd_end += PAGE_OFFSET; in find_ramdisk() 1508 unsigned long vstart = PAGE_OFFSET + pstart; in kernel_map_range() 1509 unsigned long vend = PAGE_OFFSET + pend; in kernel_map_range() 1633 flush_tsb_kernel_range(PAGE_OFFSET + phys_start, in __kernel_map_pages() 1634 PAGE_OFFSET + phys_end); in __kernel_map_pages() 1639 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, in __kernel_map_pages() 1640 PAGE_OFFSET + phys_end); in __kernel_map_pages() 1656 unsigned long PAGE_OFFSET; variable [all …]
|
D | viking.S | 37 sethi %hi(PAGE_OFFSET), %g2 42 sethi %hi(PAGE_OFFSET + PAGE_SIZE - 0x80000000), %o3 62 add %g4, %o3, %g2 ! (PAGE_OFFSET + PAGE_SIZE) | (set << 5) 90 sethi %hi(PAGE_OFFSET), %g2
|
D | init_32.c | 226 initrd_start = (initrd_start - phys_base) + PAGE_OFFSET; in bootmem_init() 227 initrd_end = (initrd_end - phys_base) + PAGE_OFFSET; in bootmem_init()
|
D | ultra.S | 156 sethi %hi(PAGE_OFFSET), %g1 159 ldx [%g1 + %lo(PAGE_OFFSET)], %g1 181 sethi %hi(PAGE_OFFSET), %g1 182 ldx [%g1 + %lo(PAGE_OFFSET)], %g1 290 sethi %hi(PAGE_OFFSET), %g1 291 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
|
/linux-4.1.27/arch/sparc/lib/ |
D | clear_page.S | 40 sethi %hi(PAGE_OFFSET), %g2 43 ldx [%g2 + %lo(PAGE_OFFSET)], %g2
|
D | copy_page.S | 49 sethi %hi(PAGE_OFFSET), %g2 52 ldx [%g2 + %lo(PAGE_OFFSET)], %g2
|
/linux-4.1.27/arch/unicore32/mm/ |
D | mmu.c | 222 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { in create_mapping() 271 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { in early_vmalloc() 272 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); in early_vmalloc() 311 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) in prepare_page_table()
|
D | init.c | 404 PAGE_OFFSET, (unsigned long)high_memory, in mem_init() 405 DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M), in mem_init()
|
D | proc-ucv2.S | 124 sub r2, r0, #PAGE_OFFSET
|
D | proc-macros.S | 102 add \pa, \pa, #PAGE_OFFSET @ pa <- virt addr of page table
|
/linux-4.1.27/arch/x86/pci/ |
D | pcbios.c | 45 set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT); in set_bios_x() 134 pci_indirect.address = pcibios_entry + PAGE_OFFSET; in check_pcibios() 358 bios32_indirect.address = bios32_entry + PAGE_OFFSET; in pci_find_bios()
|
/linux-4.1.27/arch/mips/include/asm/mach-ip22/ |
D | spaces.h | 16 #define PAGE_OFFSET 0xffffffff80000000UL macro
|
/linux-4.1.27/arch/um/include/asm/ |
D | page.h | 97 #define PAGE_OFFSET (uml_physmem) macro 98 #define KERNELBASE PAGE_OFFSET
|
/linux-4.1.27/arch/mips/include/asm/mach-ar7/ |
D | spaces.h | 17 #define PAGE_OFFSET _AC(0x94000000, UL) macro
|
/linux-4.1.27/arch/mips/include/asm/mach-malta/ |
D | spaces.h | 36 #define PAGE_OFFSET _AC(0x0, UL) macro
|
/linux-4.1.27/arch/cris/include/arch-v32/arch/ |
D | page.h | 7 #define PAGE_OFFSET KSEG_C /* kseg_c is mapped to physical ram. */ macro
|
/linux-4.1.27/arch/unicore32/kernel/ |
D | hibernate_asm.S | 29 sub r0, r0, #PAGE_OFFSET 62 sub r0, r0, #PAGE_OFFSET
|
D | vmlinux.lds.S | 26 . = PAGE_OFFSET + KERNEL_IMAGE_START;
|
D | head.S | 27 #define KERNEL_RAM_VADDR (PAGE_OFFSET + KERNEL_IMAGE_START) 122 add r0, r4, #PAGE_OFFSET >> 20
|
D | hibernate.c | 81 pgd_idx = pgd_index(PAGE_OFFSET); in resume_physical_mapping_init()
|
D | traps.c | 54 if (sp < PAGE_OFFSET || in verify_stack()
|
/linux-4.1.27/arch/sparc/power/ |
D | hibernate_asm.S | 57 sethi %hi(PAGE_OFFSET), %g7 58 ldx [%g7 + %lo(PAGE_OFFSET)], %g7
|
/linux-4.1.27/arch/arm/mach-socfpga/ |
D | headsmp.S | 25 sub r2, r2, #PAGE_OFFSET
|
/linux-4.1.27/arch/ia64/mm/ |
D | contig.c | 54 if (start == PAGE_OFFSET) { in find_bootmap_location() 61 free_start = PAGE_OFFSET; in find_bootmap_location()
|
/linux-4.1.27/arch/cris/arch-v32/mm/ |
D | init.c | 148 zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT; in paging_init() 159 free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0); in paging_init()
|
/linux-4.1.27/arch/arm/mach-tegra/ |
D | pm.c | 198 cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu); in tegra_idle_lp2_last() 343 cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, tegra_sleep_func); in tegra_suspend_enter()
|
/linux-4.1.27/arch/x86/include/asm/ |
D | pgtable_32_types.h | 53 #define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
|
D | page_64.h | 17 x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET)); in __phys_addr_nodebug()
|
D | page_32.h | 8 #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
|
D | page_types.h | 30 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) macro
|
D | page.h | 54 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
|
D | dma.h | 80 #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000)
|
D | io.h | 233 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
|
/linux-4.1.27/arch/arm/mm/ |
D | init.c | 527 MLM(PAGE_OFFSET, (unsigned long)high_memory), in mem_init() 555 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); in mem_init() 556 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); in mem_init() 582 .start = PAGE_OFFSET,
|
D | proc-v7-3level.S | 132 cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET?
|
D | mmu.c | 858 md->virtual >= PAGE_OFFSET && in create_mapping() 1056 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { in early_vmalloc() 1057 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); in early_vmalloc() 1167 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) in prepare_page_table()
|
/linux-4.1.27/arch/arm/kernel/ |
D | head.S | 37 #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 111 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) 149 .long PAGE_OFFSET 230 add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) 265 addne r3, r3, #PAGE_OFFSET
|
D | vmlinux.lds.S | 90 . = PAGE_OFFSET + TEXT_OFFSET; 232 . = PAGE_OFFSET + TEXT_OFFSET;
|
D | atags_parse.c | 209 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset); in setup_machine_tags()
|
/linux-4.1.27/arch/alpha/mm/ |
D | init.c | 109 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT; in switch_to_system_map() 134 if (original_pcb_ptr < PAGE_OFFSET) { in switch_to_system_map()
|
/linux-4.1.27/arch/cris/arch-v10/mm/ |
D | init.c | 174 zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; in paging_init() 185 free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0); in paging_init()
|
/linux-4.1.27/arch/arm64/mm/ |
D | init.c | 317 MLM((unsigned long)virt_to_page(PAGE_OFFSET), in mem_init() 323 MLM(PAGE_OFFSET, (unsigned long)high_memory), in mem_init()
|
D | dump.c | 66 { PAGE_OFFSET, "Kernel Mapping" }, 333 (unsigned long)virt_to_page(PAGE_OFFSET); in ptdump_init()
|
/linux-4.1.27/drivers/video/fbdev/ |
D | clps711xfb.c | 185 unsigned long phys_base = PAGE_OFFSET; in clps711x_guess_lcd_params() 186 void *virt_base = (void *)PAGE_OFFSET; in clps711x_guess_lcd_params()
|
/linux-4.1.27/arch/ia64/include/asm/sn/ |
D | addrs.h | 129 #define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE) 132 #define GET_BASE (PAGE_OFFSET | AS_GET_SPACE)
|
/linux-4.1.27/arch/m68k/sun3/ |
D | mmu_emu.c | 190 for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE) in mmu_emu_init() 309 if(vaddr >= PAGE_OFFSET) { in mmu_emu_map_pmeg()
|
D | config.c | 154 memory_end = *(romvec->pv_sun3mem) + PAGE_OFFSET - 2*PAGE_SIZE; in config_sun3()
|
/linux-4.1.27/arch/m32r/kernel/ |
D | setup.c | 104 memory_start = (unsigned long)CONFIG_MEMORY_START+PAGE_OFFSET; in parse_mem_cmdline() 205 initrd_start = INITRD_START + PAGE_OFFSET; in setup_memory()
|
/linux-4.1.27/arch/arm/mach-keystone/ |
D | keystone.c | 95 __pv_offset = (offset - PAGE_OFFSET); in keystone_init_meminfo()
|
/linux-4.1.27/arch/mn10300/kernel/ |
D | traps.c | 322 if ((unsigned long) current >= PAGE_OFFSET && in show_registers_only() 366 if (regs->pc < PAGE_OFFSET) in show_registers() 615 return pc >= PAGE_OFFSET; in is_valid_bugaddr()
|
/linux-4.1.27/arch/x86/platform/efi/ |
D | efi_64.c | 137 num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); in efi_sync_low_kernel_mappings() 139 memcpy(pgd + pgd_index(PAGE_OFFSET), in efi_sync_low_kernel_mappings() 140 init_mm.pgd + pgd_index(PAGE_OFFSET), in efi_sync_low_kernel_mappings()
|
/linux-4.1.27/arch/metag/kernel/ |
D | cachepart.c | 68 #if PAGE_OFFSET >= LINGLOBAL_BASE in get_thread_cache_size()
|
D | setup.c | 307 if (PAGE_OFFSET != text_start) in setup_arch() 309 PAGE_OFFSET, text_start); in setup_arch()
|
/linux-4.1.27/arch/cris/arch-v10/kernel/ |
D | traps.c | 65 if (regs->irp < PAGE_OFFSET) in show_registers()
|
/linux-4.1.27/arch/mn10300/include/asm/ |
D | page.h | 96 #define PAGE_OFFSET ((unsigned long) __PAGE_OFFSET) macro
|
/linux-4.1.27/arch/cris/kernel/ |
D | setup.c | 130 min_low_pfn = PAGE_OFFSET >> PAGE_SHIFT; in setup_arch()
|
/linux-4.1.27/arch/x86/power/ |
D | hibernate_32.c | 89 pgd_idx = pgd_index(PAGE_OFFSET); in resume_physical_mapping_init()
|
/linux-4.1.27/arch/cris/arch-v32/kernel/ |
D | traps.c | 73 if (regs->erp < PAGE_OFFSET) in show_registers()
|
/linux-4.1.27/arch/xtensa/mm/ |
D | init.c | 359 PAGE_OFFSET, PAGE_OFFSET + in mem_init()
|
D | tlb.c | 154 if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET && in local_flush_tlb_kernel_range()
|
/linux-4.1.27/arch/frv/kernel/ |
D | setup.c | 879 kstart = (unsigned long) &__kernel_image_start - PAGE_OFFSET; in setup_linux_memory() 880 kend = (unsigned long) &__kernel_image_end - PAGE_OFFSET; in setup_linux_memory() 935 initrd_start = INITRD_START + PAGE_OFFSET; in setup_linux_memory() 981 max_mapnr = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT; in setup_uclinux_memory()
|
/linux-4.1.27/arch/openrisc/mm/ |
D | init.c | 88 v = PAGE_OFFSET; in map_ram()
|
/linux-4.1.27/arch/x86/platform/olpc/ |
D | olpc-xo1-pm.c | 36 } ofw_bios_entry = { 0xF0000 + PAGE_OFFSET, __KERNEL_CS };
|
/linux-4.1.27/arch/s390/include/asm/ |
D | page.h | 143 #define PAGE_OFFSET 0x0UL macro
|
/linux-4.1.27/arch/m32r/mm/ |
D | discontig.c | 111 initrd_start = INITRD_START + PAGE_OFFSET; in setup_memory()
|
/linux-4.1.27/arch/powerpc/platforms/embedded6xx/ |
D | wii.c | 111 setbat(4, PAGE_OFFSET+delta, delta, bl, PAGE_KERNEL_X); in wii_mmu_mapin_mem2()
|
/linux-4.1.27/arch/ia64/kernel/ |
D | setup.c | 153 if (start == PAGE_OFFSET) { in filter_rsvd_memory() 162 prev_start = PAGE_OFFSET; in filter_rsvd_memory() 191 if (start == PAGE_OFFSET) { in filter_memory()
|
D | uncached.c | 111 uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; in uncached_add_chunk()
|
D | vmlinux.lds.S | 33 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
|
/linux-4.1.27/arch/m68k/kernel/ |
D | setup_no.c | 244 PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */ in setup_arch()
|
/linux-4.1.27/arch/s390/pci/ |
D | pci_dma.c | 438 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET; in zpci_dma_init_device() 448 zdev->start_dma + PAGE_OFFSET, in zpci_dma_init_device()
|
/linux-4.1.27/mm/kasan/ |
D | report.c | 104 if ((addr >= (void *)PAGE_OFFSET) && in print_address_description()
|
/linux-4.1.27/arch/alpha/boot/ |
D | bootpz.c | 88 kaddr = (find_pa(vaddr) | PAGE_OFFSET); in check_range()
|
/linux-4.1.27/arch/avr32/kernel/ |
D | traps.c | 177 if (pc < PAGE_OFFSET) in is_valid_bugaddr()
|
/linux-4.1.27/fs/proc/ |
D | kcore.c | 44 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET) 47 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
|
/linux-4.1.27/include/linux/ |
D | kvm_host.h | 91 #define KVM_HVA_ERR_BAD (PAGE_OFFSET) 92 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 96 return addr >= PAGE_OFFSET; in kvm_is_error_hva()
|