root/arch/arm/include/asm/pgtable.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pmd_page_vaddr
  2. pte_access_permitted
  3. __sync_icache_dcache
  4. set_pte_at
  5. clear_pte_bit
  6. set_pte_bit
  7. pte_wrprotect
  8. pte_mkwrite
  9. pte_mkclean
  10. pte_mkdirty
  11. pte_mkold
  12. pte_mkyoung
  13. pte_mkexec
  14. pte_mknexec
  15. pte_modify

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  *  arch/arm/include/asm/pgtable.h
   4  *
   5  *  Copyright (C) 1995-2002 Russell King
   6  */
   7 #ifndef _ASMARM_PGTABLE_H
   8 #define _ASMARM_PGTABLE_H
   9 
  10 #include <linux/const.h>
  11 #include <asm/proc-fns.h>
  12 
  13 #ifndef CONFIG_MMU
  14 
  15 #include <asm-generic/4level-fixup.h>
  16 #include <asm/pgtable-nommu.h>
  17 
  18 #else
  19 
  20 #define __ARCH_USE_5LEVEL_HACK
  21 #include <asm-generic/pgtable-nopud.h>
  22 #include <asm/memory.h>
  23 #include <asm/pgtable-hwdef.h>
  24 
  25 
  26 #include <asm/tlbflush.h>
  27 
  28 #ifdef CONFIG_ARM_LPAE
  29 #include <asm/pgtable-3level.h>
  30 #else
  31 #include <asm/pgtable-2level.h>
  32 #endif
  33 
  34 /*
  35  * Just any arbitrary offset to the start of the vmalloc VM area: the
  36  * current 8MB value just means that there will be a 8MB "hole" after the
  37  * physical memory until the kernel virtual memory starts.  That means that
  38  * any out-of-bounds memory accesses will hopefully be caught.
  39  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  40  * area for the same reason. ;)
  41  */
  42 #define VMALLOC_OFFSET          (8*1024*1024)
  43 #define VMALLOC_START           (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  44 #define VMALLOC_END             0xff800000UL
  45 
  46 #define LIBRARY_TEXT_START      0x0c000000
  47 
  48 #ifndef __ASSEMBLY__
  49 extern void __pte_error(const char *file, int line, pte_t);
  50 extern void __pmd_error(const char *file, int line, pmd_t);
  51 extern void __pgd_error(const char *file, int line, pgd_t);
  52 
  53 #define pte_ERROR(pte)          __pte_error(__FILE__, __LINE__, pte)
  54 #define pmd_ERROR(pmd)          __pmd_error(__FILE__, __LINE__, pmd)
  55 #define pgd_ERROR(pgd)          __pgd_error(__FILE__, __LINE__, pgd)
  56 
  57 /*
  58  * This is the lowest virtual address we can permit any user space
  59  * mapping to be mapped at.  This is particularly important for
  60  * non-high vector CPUs.
  61  */
  62 #define FIRST_USER_ADDRESS      (PAGE_SIZE * 2)
  63 
  64 /*
  65  * Use TASK_SIZE as the ceiling argument for free_pgtables() and
  66  * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
  67  * page shared between user and kernel).
  68  */
  69 #ifdef CONFIG_ARM_LPAE
  70 #define USER_PGTABLES_CEILING   TASK_SIZE
  71 #endif
  72 
  73 /*
  74  * The pgprot_* and protection_map entries will be fixed up in runtime
  75  * to include the cachable and bufferable bits based on memory policy,
  76  * as well as any architecture dependent bits like global/ASID and SMP
  77  * shared mapping bits.
  78  */
  79 #define _L_PTE_DEFAULT  L_PTE_PRESENT | L_PTE_YOUNG
  80 
  81 extern pgprot_t         pgprot_user;
  82 extern pgprot_t         pgprot_kernel;
  83 extern pgprot_t         pgprot_hyp_device;
  84 extern pgprot_t         pgprot_s2;
  85 extern pgprot_t         pgprot_s2_device;
  86 
  87 #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
  88 
  89 #define PAGE_NONE               _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
  90 #define PAGE_SHARED             _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
  91 #define PAGE_SHARED_EXEC        _MOD_PROT(pgprot_user, L_PTE_USER)
  92 #define PAGE_COPY               _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
  93 #define PAGE_COPY_EXEC          _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
  94 #define PAGE_READONLY           _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
  95 #define PAGE_READONLY_EXEC      _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
  96 #define PAGE_KERNEL             _MOD_PROT(pgprot_kernel, L_PTE_XN)
  97 #define PAGE_KERNEL_EXEC        pgprot_kernel
  98 #define PAGE_HYP                _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
  99 #define PAGE_HYP_EXEC           _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
 100 #define PAGE_HYP_RO             _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
 101 #define PAGE_HYP_DEVICE         _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
 102 #define PAGE_S2                 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN)
 103 #define PAGE_S2_DEVICE          _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN)
 104 
 105 #define __PAGE_NONE             __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
 106 #define __PAGE_SHARED           __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
 107 #define __PAGE_SHARED_EXEC      __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
 108 #define __PAGE_COPY             __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
 109 #define __PAGE_COPY_EXEC        __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
 110 #define __PAGE_READONLY         __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
 111 #define __PAGE_READONLY_EXEC    __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
 112 
 113 #define __pgprot_modify(prot,mask,bits)         \
 114         __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
 115 
 116 #define pgprot_noncached(prot) \
 117         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
 118 
 119 #define pgprot_writecombine(prot) \
 120         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
 121 
 122 #define pgprot_stronglyordered(prot) \
 123         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
 124 
 125 #define pgprot_device(prot) \
 126         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN)
 127 
 128 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 129 #define pgprot_dmacoherent(prot) \
 130         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
 131 #define __HAVE_PHYS_MEM_ACCESS_PROT
 132 struct file;
 133 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 134                                      unsigned long size, pgprot_t vma_prot);
 135 #else
 136 #define pgprot_dmacoherent(prot) \
 137         __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
 138 #endif
 139 
 140 #endif /* __ASSEMBLY__ */
 141 
 142 /*
 143  * The table below defines the page protection levels that we insert into our
 144  * Linux page table version.  These get translated into the best that the
 145  * architecture can perform.  Note that on most ARM hardware:
 146  *  1) We cannot do execute protection
 147  *  2) If we could do execute protection, then read is implied
 148  *  3) write implies read permissions
 149  */
 150 #define __P000  __PAGE_NONE
 151 #define __P001  __PAGE_READONLY
 152 #define __P010  __PAGE_COPY
 153 #define __P011  __PAGE_COPY
 154 #define __P100  __PAGE_READONLY_EXEC
 155 #define __P101  __PAGE_READONLY_EXEC
 156 #define __P110  __PAGE_COPY_EXEC
 157 #define __P111  __PAGE_COPY_EXEC
 158 
 159 #define __S000  __PAGE_NONE
 160 #define __S001  __PAGE_READONLY
 161 #define __S010  __PAGE_SHARED
 162 #define __S011  __PAGE_SHARED
 163 #define __S100  __PAGE_READONLY_EXEC
 164 #define __S101  __PAGE_READONLY_EXEC
 165 #define __S110  __PAGE_SHARED_EXEC
 166 #define __S111  __PAGE_SHARED_EXEC
 167 
 168 #ifndef __ASSEMBLY__
 169 /*
 170  * ZERO_PAGE is a global shared page that is always zero: used
 171  * for zero-mapped memory areas etc..
 172  */
 173 extern struct page *empty_zero_page;
 174 #define ZERO_PAGE(vaddr)        (empty_zero_page)
 175 
 176 
 177 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 178 
 179 /* to find an entry in a page-table-directory */
 180 #define pgd_index(addr)         ((addr) >> PGDIR_SHIFT)
 181 
 182 #define pgd_offset(mm, addr)    ((mm)->pgd + pgd_index(addr))
 183 
 184 /* to find an entry in a kernel page-table-directory */
 185 #define pgd_offset_k(addr)      pgd_offset(&init_mm, addr)
 186 
 187 #define pmd_none(pmd)           (!pmd_val(pmd))
 188 
 189 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 190 {
 191         return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
 192 }
 193 
 194 #define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 195 
 196 #ifndef CONFIG_HIGHPTE
 197 #define __pte_map(pmd)          pmd_page_vaddr(*(pmd))
 198 #define __pte_unmap(pte)        do { } while (0)
 199 #else
 200 #define __pte_map(pmd)          (pte_t *)kmap_atomic(pmd_page(*(pmd)))
 201 #define __pte_unmap(pte)        kunmap_atomic(pte)
 202 #endif
 203 
 204 #define pte_index(addr)         (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 205 
 206 #define pte_offset_kernel(pmd,addr)     (pmd_page_vaddr(*(pmd)) + pte_index(addr))
 207 
 208 #define pte_offset_map(pmd,addr)        (__pte_map(pmd) + pte_index(addr))
 209 #define pte_unmap(pte)                  __pte_unmap(pte)
 210 
 211 #define pte_pfn(pte)            ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
 212 #define pfn_pte(pfn,prot)       __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
 213 
 214 #define pte_page(pte)           pfn_to_page(pte_pfn(pte))
 215 #define mk_pte(page,prot)       pfn_pte(page_to_pfn(page), prot)
 216 
 217 #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
 218 
 219 #define pte_isset(pte, val)     ((u32)(val) == (val) ? pte_val(pte) & (val) \
 220                                                 : !!(pte_val(pte) & (val)))
 221 #define pte_isclear(pte, val)   (!(pte_val(pte) & (val)))
 222 
 223 #define pte_none(pte)           (!pte_val(pte))
 224 #define pte_present(pte)        (pte_isset((pte), L_PTE_PRESENT))
 225 #define pte_valid(pte)          (pte_isset((pte), L_PTE_VALID))
 226 #define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
 227 #define pte_write(pte)          (pte_isclear((pte), L_PTE_RDONLY))
 228 #define pte_dirty(pte)          (pte_isset((pte), L_PTE_DIRTY))
 229 #define pte_young(pte)          (pte_isset((pte), L_PTE_YOUNG))
 230 #define pte_exec(pte)           (pte_isclear((pte), L_PTE_XN))
 231 
 232 #define pte_valid_user(pte)     \
 233         (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
 234 
 235 static inline bool pte_access_permitted(pte_t pte, bool write)
 236 {
 237         pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
 238         pteval_t needed = mask;
 239 
 240         if (write)
 241                 mask |= L_PTE_RDONLY;
 242 
 243         return (pte_val(pte) & mask) == needed;
 244 }
 245 #define pte_access_permitted pte_access_permitted
 246 
 247 #if __LINUX_ARM_ARCH__ < 6
 248 static inline void __sync_icache_dcache(pte_t pteval)
 249 {
 250 }
 251 #else
 252 extern void __sync_icache_dcache(pte_t pteval);
 253 #endif
 254 
 255 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 256                               pte_t *ptep, pte_t pteval)
 257 {
 258         unsigned long ext = 0;
 259 
 260         if (addr < TASK_SIZE && pte_valid_user(pteval)) {
 261                 if (!pte_special(pteval))
 262                         __sync_icache_dcache(pteval);
 263                 ext |= PTE_EXT_NG;
 264         }
 265 
 266         set_pte_ext(ptep, pteval, ext);
 267 }
 268 
 269 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
 270 {
 271         pte_val(pte) &= ~pgprot_val(prot);
 272         return pte;
 273 }
 274 
 275 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
 276 {
 277         pte_val(pte) |= pgprot_val(prot);
 278         return pte;
 279 }
 280 
 281 static inline pte_t pte_wrprotect(pte_t pte)
 282 {
 283         return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
 284 }
 285 
 286 static inline pte_t pte_mkwrite(pte_t pte)
 287 {
 288         return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
 289 }
 290 
 291 static inline pte_t pte_mkclean(pte_t pte)
 292 {
 293         return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
 294 }
 295 
 296 static inline pte_t pte_mkdirty(pte_t pte)
 297 {
 298         return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
 299 }
 300 
 301 static inline pte_t pte_mkold(pte_t pte)
 302 {
 303         return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
 304 }
 305 
 306 static inline pte_t pte_mkyoung(pte_t pte)
 307 {
 308         return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
 309 }
 310 
 311 static inline pte_t pte_mkexec(pte_t pte)
 312 {
 313         return clear_pte_bit(pte, __pgprot(L_PTE_XN));
 314 }
 315 
 316 static inline pte_t pte_mknexec(pte_t pte)
 317 {
 318         return set_pte_bit(pte, __pgprot(L_PTE_XN));
 319 }
 320 
 321 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 322 {
 323         const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
 324                 L_PTE_NONE | L_PTE_VALID;
 325         pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 326         return pte;
 327 }
 328 
 329 /*
 330  * Encode and decode a swap entry.  Swap entries are stored in the Linux
 331  * page tables as follows:
 332  *
 333  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
 334  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
 335  *   <--------------- offset ------------------------> < type -> 0 0
 336  *
 337  * This gives us up to 31 swap files and 128GB per swap file.  Note that
 338  * the offset field is always non-zero.
 339  */
 340 #define __SWP_TYPE_SHIFT        2
 341 #define __SWP_TYPE_BITS         5
 342 #define __SWP_TYPE_MASK         ((1 << __SWP_TYPE_BITS) - 1)
 343 #define __SWP_OFFSET_SHIFT      (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
 344 
 345 #define __swp_type(x)           (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
 346 #define __swp_offset(x)         ((x).val >> __SWP_OFFSET_SHIFT)
 347 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
 348 
 349 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 350 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
 351 
 352 /*
 353  * It is an error for the kernel to have more swap files than we can
 354  * encode in the PTEs.  This ensures that we know when MAX_SWAPFILES
 355  * is increased beyond what we presently support.
 356  */
 357 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
 358 
 359 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 360 /* FIXME: this is not correct */
 361 #define kern_addr_valid(addr)   (1)
 362 
 363 #include <asm-generic/pgtable.h>
 364 
 365 /*
 366  * We provide our own arch_get_unmapped_area to cope with VIPT caches.
 367  */
 368 #define HAVE_ARCH_UNMAPPED_AREA
 369 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 370 
 371 #endif /* !__ASSEMBLY__ */
 372 
 373 #endif /* CONFIG_MMU */
 374 
 375 #endif /* _ASMARM_PGTABLE_H */

/* [<][>][^][v][top][bottom][index][help] */