root/arch/powerpc/include/asm/nohash/32/pgtable.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pte_mkwrite
  2. pte_mkdirty
  3. pte_mkyoung
  4. pte_wrprotect
  5. pte_mkexec
  6. pmd_clear
  7. pte_update
  8. pte_update
  9. __ptep_test_and_clear_young
  10. ptep_get_and_clear
  11. ptep_set_wrprotect
  12. __ptep_set_access_flags
  13. pte_young

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
   3 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H
   4 
   5 #define __ARCH_USE_5LEVEL_HACK
   6 #include <asm-generic/pgtable-nopmd.h>
   7 
   8 #ifndef __ASSEMBLY__
   9 #include <linux/sched.h>
  10 #include <linux/threads.h>
  11 #include <asm/mmu.h>                    /* For sub-arch specific PPC_PIN_SIZE */
  12 #include <asm/asm-405.h>
  13 
  14 #ifdef CONFIG_44x
  15 extern int icache_44x_need_flush;
  16 #endif
  17 
  18 #endif /* __ASSEMBLY__ */
  19 
  20 #define PTE_INDEX_SIZE  PTE_SHIFT
  21 #define PMD_INDEX_SIZE  0
  22 #define PUD_INDEX_SIZE  0
  23 #define PGD_INDEX_SIZE  (32 - PGDIR_SHIFT)
  24 
  25 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
  26 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
  27 
  28 #ifndef __ASSEMBLY__
  29 #define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_INDEX_SIZE)
  30 #define PMD_TABLE_SIZE  0
  31 #define PUD_TABLE_SIZE  0
  32 #define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
  33 #endif  /* __ASSEMBLY__ */
  34 
  35 #define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
  36 #define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
  37 
  38 /*
  39  * The normal case is that PTEs are 32-bits and we have a 1-page
  40  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
  41  *
  42  * For any >32-bit physical address platform, we can use the following
  43  * two level page table layout where the pgdir is 8KB and the MS 13 bits
  44  * are an index to the second level table.  The combined pgdir/pmd first
  45  * level has 2048 entries and the second level has 512 64-bit PTE entries.
  46  * -Matt
  47  */
  48 /* PGDIR_SHIFT determines what a top-level page table entry can map */
  49 #define PGDIR_SHIFT     (PAGE_SHIFT + PTE_INDEX_SIZE)
  50 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  51 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
  52 
  53 /* Bits to mask out from a PGD to get to the PUD page */
  54 #define PGD_MASKED_BITS         0
  55 
  56 #define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
  57 #define FIRST_USER_ADDRESS      0UL
  58 
  59 #define pte_ERROR(e) \
  60         pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
  61                 (unsigned long long)pte_val(e))
  62 #define pgd_ERROR(e) \
  63         pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  64 
  65 #ifndef __ASSEMBLY__
  66 
  67 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
  68 
  69 #endif /* !__ASSEMBLY__ */
  70 
  71 
  72 /*
  73  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
  74  * value (for now) on others, from where we can start layout kernel
  75  * virtual space that goes below PKMAP and FIXMAP
  76  */
  77 #include <asm/fixmap.h>
  78 
  79 /*
  80  * ioremap_bot starts at that address. Early ioremaps move down from there,
  81  * until mem_init() at which point this becomes the top of the vmalloc
  82  * and ioremap space
  83  */
  84 #ifdef CONFIG_HIGHMEM
  85 #define IOREMAP_TOP     PKMAP_BASE
  86 #else
  87 #define IOREMAP_TOP     FIXADDR_START
  88 #endif
  89 
  90 /* PPC32 shares vmalloc area with ioremap */
  91 #define IOREMAP_START   VMALLOC_START
  92 #define IOREMAP_END     VMALLOC_END
  93 
  94 /*
  95  * Just any arbitrary offset to the start of the vmalloc VM area: the
  96  * current 16MB value just means that there will be a 64MB "hole" after the
  97  * physical memory until the kernel virtual memory starts.  That means that
  98  * any out-of-bounds memory accesses will hopefully be caught.
  99  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 100  * area for the same reason. ;)
 101  *
 102  * We no longer map larger than phys RAM with the BATs so we don't have
 103  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
 104  * about clashes between our early calls to ioremap() that start growing down
 105  * from IOREMAP_TOP being run into the VM area allocations (growing upwards
 106  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
 107  * we actually run into our mappings setup in the early boot with the VM
 108  * system.  This really does become a problem for machines with good amounts
 109  * of RAM.  -- Cort
 110  */
 111 #define VMALLOC_OFFSET (0x1000000) /* 16M */
 112 #ifdef PPC_PIN_SIZE
 113 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 114 #else
 115 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 116 #endif
 117 #define VMALLOC_END     ioremap_bot
 118 
 119 /*
 120  * Bits in a linux-style PTE.  These match the bits in the
 121  * (hardware-defined) PowerPC PTE as closely as possible.
 122  */
 123 
 124 #if defined(CONFIG_40x)
 125 #include <asm/nohash/32/pte-40x.h>
 126 #elif defined(CONFIG_44x)
 127 #include <asm/nohash/32/pte-44x.h>
 128 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
 129 #include <asm/nohash/pte-book3e.h>
 130 #elif defined(CONFIG_FSL_BOOKE)
 131 #include <asm/nohash/32/pte-fsl-booke.h>
 132 #elif defined(CONFIG_PPC_8xx)
 133 #include <asm/nohash/32/pte-8xx.h>
 134 #endif
 135 
 136 /*
 137  * Location of the PFN in the PTE. Most 32-bit platforms use the same
 138  * as _PAGE_SHIFT here (ie, naturally aligned).
 139  * Platform who don't just pre-define the value so we don't override it here.
 140  */
 141 #ifndef PTE_RPN_SHIFT
 142 #define PTE_RPN_SHIFT   (PAGE_SHIFT)
 143 #endif
 144 
 145 /*
 146  * The mask covered by the RPN must be a ULL on 32-bit platforms with
 147  * 64-bit PTEs.
 148  */
 149 #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
 150 #define PTE_RPN_MASK    (~((1ULL << PTE_RPN_SHIFT) - 1))
 151 #else
 152 #define PTE_RPN_MASK    (~((1UL << PTE_RPN_SHIFT) - 1))
 153 #endif
 154 
 155 /*
 156  * _PAGE_CHG_MASK masks of bits that are to be preserved across
 157  * pgprot changes.
 158  */
 159 #define _PAGE_CHG_MASK  (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
 160 
 161 #ifndef __ASSEMBLY__
 162 
 163 #define pte_clear(mm, addr, ptep) \
 164         do { pte_update(ptep, ~0, 0); } while (0)
 165 
 166 #ifndef pte_mkwrite
 167 static inline pte_t pte_mkwrite(pte_t pte)
 168 {
 169         return __pte(pte_val(pte) | _PAGE_RW);
 170 }
 171 #endif
 172 
 173 static inline pte_t pte_mkdirty(pte_t pte)
 174 {
 175         return __pte(pte_val(pte) | _PAGE_DIRTY);
 176 }
 177 
 178 static inline pte_t pte_mkyoung(pte_t pte)
 179 {
 180         return __pte(pte_val(pte) | _PAGE_ACCESSED);
 181 }
 182 
 183 #ifndef pte_wrprotect
 184 static inline pte_t pte_wrprotect(pte_t pte)
 185 {
 186         return __pte(pte_val(pte) & ~_PAGE_RW);
 187 }
 188 #endif
 189 
 190 static inline pte_t pte_mkexec(pte_t pte)
 191 {
 192         return __pte(pte_val(pte) | _PAGE_EXEC);
 193 }
 194 
 195 #define pmd_none(pmd)           (!pmd_val(pmd))
 196 #define pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
 197 #define pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
 198 static inline void pmd_clear(pmd_t *pmdp)
 199 {
 200         *pmdp = __pmd(0);
 201 }
 202 
 203 
 204 
 205 /*
 206  * PTE updates. This function is called whenever an existing
 207  * valid PTE is updated. This does -not- include set_pte_at()
 208  * which nowadays only sets a new PTE.
 209  *
 210  * Depending on the type of MMU, we may need to use atomic updates
 211  * and the PTE may be either 32 or 64 bit wide. In the later case,
 212  * when using atomic updates, only the low part of the PTE is
 213  * accessed atomically.
 214  *
 215  * In addition, on 44x, we also maintain a global flag indicating
 216  * that an executable user mapping was modified, which is needed
 217  * to properly flush the virtually tagged instruction cache of
 218  * those implementations.
 219  */
 220 #ifndef CONFIG_PTE_64BIT
 221 static inline unsigned long pte_update(pte_t *p,
 222                                        unsigned long clr,
 223                                        unsigned long set)
 224 {
 225 #ifdef PTE_ATOMIC_UPDATES
 226         unsigned long old, tmp;
 227 
 228         __asm__ __volatile__("\
 229 1:      lwarx   %0,0,%3\n\
 230         andc    %1,%0,%4\n\
 231         or      %1,%1,%5\n"
 232         PPC405_ERR77(0,%3)
 233 "       stwcx.  %1,0,%3\n\
 234         bne-    1b"
 235         : "=&r" (old), "=&r" (tmp), "=m" (*p)
 236         : "r" (p), "r" (clr), "r" (set), "m" (*p)
 237         : "cc" );
 238 #else /* PTE_ATOMIC_UPDATES */
 239         unsigned long old = pte_val(*p);
 240         unsigned long new = (old & ~clr) | set;
 241 
 242 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
 243         p->pte = p->pte1 = p->pte2 = p->pte3 = new;
 244 #else
 245         *p = __pte(new);
 246 #endif
 247 #endif /* !PTE_ATOMIC_UPDATES */
 248 
 249 #ifdef CONFIG_44x
 250         if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
 251                 icache_44x_need_flush = 1;
 252 #endif
 253         return old;
 254 }
 255 #else /* CONFIG_PTE_64BIT */
 256 static inline unsigned long long pte_update(pte_t *p,
 257                                             unsigned long clr,
 258                                             unsigned long set)
 259 {
 260 #ifdef PTE_ATOMIC_UPDATES
 261         unsigned long long old;
 262         unsigned long tmp;
 263 
 264         __asm__ __volatile__("\
 265 1:      lwarx   %L0,0,%4\n\
 266         lwzx    %0,0,%3\n\
 267         andc    %1,%L0,%5\n\
 268         or      %1,%1,%6\n"
 269         PPC405_ERR77(0,%3)
 270 "       stwcx.  %1,0,%4\n\
 271         bne-    1b"
 272         : "=&r" (old), "=&r" (tmp), "=m" (*p)
 273         : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
 274         : "cc" );
 275 #else /* PTE_ATOMIC_UPDATES */
 276         unsigned long long old = pte_val(*p);
 277         *p = __pte((old & ~(unsigned long long)clr) | set);
 278 #endif /* !PTE_ATOMIC_UPDATES */
 279 
 280 #ifdef CONFIG_44x
 281         if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
 282                 icache_44x_need_flush = 1;
 283 #endif
 284         return old;
 285 }
 286 #endif /* CONFIG_PTE_64BIT */
 287 
 288 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 289 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
 290 {
 291         unsigned long old;
 292         old = pte_update(ptep, _PAGE_ACCESSED, 0);
 293         return (old & _PAGE_ACCESSED) != 0;
 294 }
 295 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
 296         __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
 297 
 298 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 299 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 300                                        pte_t *ptep)
 301 {
 302         return __pte(pte_update(ptep, ~0, 0));
 303 }
 304 
 305 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 306 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 307                                       pte_t *ptep)
 308 {
 309         unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
 310         unsigned long set = pte_val(pte_wrprotect(__pte(0)));
 311 
 312         pte_update(ptep, clr, set);
 313 }
 314 
 315 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
 316                                            pte_t *ptep, pte_t entry,
 317                                            unsigned long address,
 318                                            int psize)
 319 {
 320         pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
 321         pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
 322         unsigned long set = pte_val(entry) & pte_val(pte_set);
 323         unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
 324 
 325         pte_update(ptep, clr, set);
 326 
 327         flush_tlb_page(vma, address);
 328 }
 329 
 330 static inline int pte_young(pte_t pte)
 331 {
 332         return pte_val(pte) & _PAGE_ACCESSED;
 333 }
 334 
 335 #define __HAVE_ARCH_PTE_SAME
 336 #define pte_same(A,B)   ((pte_val(A) ^ pte_val(B)) == 0)
 337 
 338 /*
 339  * Note that on Book E processors, the pmd contains the kernel virtual
 340  * (lowmem) address of the pte page.  The physical address is less useful
 341  * because everything runs with translation enabled (even the TLB miss
 342  * handler).  On everything else the pmd contains the physical address
 343  * of the pte page.  -- paulus
 344  */
 345 #ifndef CONFIG_BOOKE
 346 #define pmd_page_vaddr(pmd)     \
 347         ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
 348 #define pmd_page(pmd)           \
 349         pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
 350 #else
 351 #define pmd_page_vaddr(pmd)     \
 352         ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
 353 #define pmd_page(pmd)           \
 354         pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
 355 #endif
 356 
 357 /* to find an entry in a kernel page-table-directory */
 358 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 359 
 360 /* to find an entry in a page-table-directory */
 361 #define pgd_index(address)       ((address) >> PGDIR_SHIFT)
 362 #define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
 363 
 364 /* Find an entry in the third-level page table.. */
 365 #define pte_index(address)              \
 366         (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 367 #define pte_offset_kernel(dir, addr)    \
 368         (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
 369                                   pte_index(addr))
 370 #define pte_offset_map(dir, addr)               \
 371         ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
 372                    (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
 373 #define pte_unmap(pte)          kunmap_atomic(pte)
 374 
 375 /*
 376  * Encode and decode a swap entry.
 377  * Note that the bits we use in a PTE for representing a swap entry
 378  * must not include the _PAGE_PRESENT bit.
 379  *   -- paulus
 380  */
 381 #define __swp_type(entry)               ((entry).val & 0x1f)
 382 #define __swp_offset(entry)             ((entry).val >> 5)
 383 #define __swp_entry(type, offset)       ((swp_entry_t) { (type) | ((offset) << 5) })
 384 #define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) >> 3 })
 385 #define __swp_entry_to_pte(x)           ((pte_t) { (x).val << 3 })
 386 
 387 #endif /* !__ASSEMBLY__ */
 388 
 389 #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */

/* [<][>][^][v][top][bottom][index][help] */