root/arch/powerpc/include/asm/book3s/32/pgtable.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pte_user
  2. pmd_clear
  3. pte_update
  4. pte_update
  5. __ptep_test_and_clear_young
  6. ptep_get_and_clear
  7. ptep_set_wrprotect
  8. __ptep_set_access_flags
  9. pte_write
  10. pte_read
  11. pte_dirty
  12. pte_young
  13. pte_special
  14. pte_none
  15. pte_exec
  16. pte_present
  17. pte_hw_valid
  18. pte_hashpte
  19. pte_ci
  20. pte_access_permitted
  21. pfn_pte
  22. pte_pfn
  23. pte_wrprotect
  24. pte_exprotect
  25. pte_mkclean
  26. pte_mkold
  27. pte_mkexec
  28. pte_mkpte
  29. pte_mkwrite
  30. pte_mkdirty
  31. pte_mkyoung
  32. pte_mkspecial
  33. pte_mkhuge
  34. pte_mkprivileged
  35. pte_mkuser
  36. pte_modify
  37. __set_pte_at
  38. pgprot_noncached
  39. pgprot_noncached_wc
  40. pgprot_cached
  41. pgprot_cached_wthru
  42. pgprot_cached_noncoherent
  43. pgprot_writecombine

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
   3 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
   4 
   5 #define __ARCH_USE_5LEVEL_HACK
   6 #include <asm-generic/pgtable-nopmd.h>
   7 
   8 #include <asm/book3s/32/hash.h>
   9 
  10 /* And here we include common definitions */
  11 
  12 #define _PAGE_KERNEL_RO         0
  13 #define _PAGE_KERNEL_ROX        (_PAGE_EXEC)
  14 #define _PAGE_KERNEL_RW         (_PAGE_DIRTY | _PAGE_RW)
  15 #define _PAGE_KERNEL_RWX        (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
  16 
  17 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
  18 
  19 #ifndef __ASSEMBLY__
  20 
  21 static inline bool pte_user(pte_t pte)
  22 {
  23         return pte_val(pte) & _PAGE_USER;
  24 }
  25 #endif /* __ASSEMBLY__ */
  26 
  27 /*
  28  * Location of the PFN in the PTE. Most 32-bit platforms use the same
  29  * as _PAGE_SHIFT here (ie, naturally aligned).
  30  * Platform who don't just pre-define the value so we don't override it here.
  31  */
  32 #define PTE_RPN_SHIFT   (PAGE_SHIFT)
  33 
  34 /*
  35  * The mask covered by the RPN must be a ULL on 32-bit platforms with
  36  * 64-bit PTEs.
  37  */
  38 #ifdef CONFIG_PTE_64BIT
  39 #define PTE_RPN_MASK    (~((1ULL << PTE_RPN_SHIFT) - 1))
  40 #else
  41 #define PTE_RPN_MASK    (~((1UL << PTE_RPN_SHIFT) - 1))
  42 #endif
  43 
  44 /*
  45  * _PAGE_CHG_MASK masks of bits that are to be preserved across
  46  * pgprot changes.
  47  */
  48 #define _PAGE_CHG_MASK  (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
  49                          _PAGE_ACCESSED | _PAGE_SPECIAL)
  50 
  51 /*
  52  * We define 2 sets of base prot bits, one for basic pages (ie,
  53  * cacheable kernel and user pages) and one for non cacheable
  54  * pages. We always set _PAGE_COHERENT when SMP is enabled or
  55  * the processor might need it for DMA coherency.
  56  */
  57 #define _PAGE_BASE_NC   (_PAGE_PRESENT | _PAGE_ACCESSED)
  58 #define _PAGE_BASE      (_PAGE_BASE_NC | _PAGE_COHERENT)
  59 
  60 /*
  61  * Permission masks used to generate the __P and __S table.
  62  *
  63  * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
  64  *
  65  * Write permissions imply read permissions for now.
  66  */
  67 #define PAGE_NONE       __pgprot(_PAGE_BASE)
  68 #define PAGE_SHARED     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
  69 #define PAGE_SHARED_X   __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
  70 #define PAGE_COPY       __pgprot(_PAGE_BASE | _PAGE_USER)
  71 #define PAGE_COPY_X     __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
  72 #define PAGE_READONLY   __pgprot(_PAGE_BASE | _PAGE_USER)
  73 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
  74 
  75 /* Permission masks used for kernel mappings */
  76 #define PAGE_KERNEL     __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
  77 #define PAGE_KERNEL_NC  __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
  78 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
  79                                  _PAGE_NO_CACHE | _PAGE_GUARDED)
  80 #define PAGE_KERNEL_X   __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
  81 #define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
  82 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
  83 
  84 /*
  85  * Protection used for kernel text. We want the debuggers to be able to
  86  * set breakpoints anywhere, so don't write protect the kernel text
  87  * on platforms where such control is possible.
  88  */
  89 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
  90         defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
  91 #define PAGE_KERNEL_TEXT        PAGE_KERNEL_X
  92 #else
  93 #define PAGE_KERNEL_TEXT        PAGE_KERNEL_ROX
  94 #endif
  95 
  96 /* Make modules code happy. We don't set RO yet */
  97 #define PAGE_KERNEL_EXEC        PAGE_KERNEL_X
  98 
  99 /* Advertise special mapping type for AGP */
 100 #define PAGE_AGP                (PAGE_KERNEL_NC)
 101 #define HAVE_PAGE_AGP
 102 
 103 #define PTE_INDEX_SIZE  PTE_SHIFT
 104 #define PMD_INDEX_SIZE  0
 105 #define PUD_INDEX_SIZE  0
 106 #define PGD_INDEX_SIZE  (32 - PGDIR_SHIFT)
 107 
 108 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
 109 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
 110 
 111 #ifndef __ASSEMBLY__
 112 #define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_INDEX_SIZE)
 113 #define PMD_TABLE_SIZE  0
 114 #define PUD_TABLE_SIZE  0
 115 #define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
 116 #endif  /* __ASSEMBLY__ */
 117 
 118 #define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
 119 #define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
 120 
 121 /*
 122  * The normal case is that PTEs are 32-bits and we have a 1-page
 123  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
 124  *
 125  * For any >32-bit physical address platform, we can use the following
 126  * two level page table layout where the pgdir is 8KB and the MS 13 bits
 127  * are an index to the second level table.  The combined pgdir/pmd first
 128  * level has 2048 entries and the second level has 512 64-bit PTE entries.
 129  * -Matt
 130  */
 131 /* PGDIR_SHIFT determines what a top-level page table entry can map */
 132 #define PGDIR_SHIFT     (PAGE_SHIFT + PTE_INDEX_SIZE)
 133 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
 134 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
 135 
 136 #define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
 137 
 138 #ifndef __ASSEMBLY__
 139 
 140 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
 141 
 142 #endif /* !__ASSEMBLY__ */
 143 
 144 /*
 145  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
 146  * value (for now) on others, from where we can start layout kernel
 147  * virtual space that goes below PKMAP and FIXMAP
 148  */
 149 #include <asm/fixmap.h>
 150 
 151 /*
 152  * ioremap_bot starts at that address. Early ioremaps move down from there,
 153  * until mem_init() at which point this becomes the top of the vmalloc
 154  * and ioremap space
 155  */
 156 #ifdef CONFIG_HIGHMEM
 157 #define IOREMAP_TOP     PKMAP_BASE
 158 #else
 159 #define IOREMAP_TOP     FIXADDR_START
 160 #endif
 161 
 162 /* PPC32 shares vmalloc area with ioremap */
 163 #define IOREMAP_START   VMALLOC_START
 164 #define IOREMAP_END     VMALLOC_END
 165 
 166 /*
 167  * Just any arbitrary offset to the start of the vmalloc VM area: the
 168  * current 16MB value just means that there will be a 64MB "hole" after the
 169  * physical memory until the kernel virtual memory starts.  That means that
 170  * any out-of-bounds memory accesses will hopefully be caught.
 171  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 172  * area for the same reason. ;)
 173  *
 174  * We no longer map larger than phys RAM with the BATs so we don't have
 175  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
 176  * about clashes between our early calls to ioremap() that start growing down
 177  * from ioremap_base being run into the VM area allocations (growing upwards
 178  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
 179  * we actually run into our mappings setup in the early boot with the VM
 180  * system.  This really does become a problem for machines with good amounts
 181  * of RAM.  -- Cort
 182  */
 183 #define VMALLOC_OFFSET (0x1000000) /* 16M */
 184 
 185 /*
 186  * With CONFIG_STRICT_KERNEL_RWX, kernel segments are set NX. But when modules
 187  * are used, NX cannot be set on VMALLOC space. So vmalloc VM space and linear
 188  * memory shall not share segments.
 189  */
 190 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES)
 191 #define VMALLOC_START ((_ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \
 192                        ~(VMALLOC_OFFSET - 1))
 193 #else
 194 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 195 #endif
 196 #define VMALLOC_END     ioremap_bot
 197 
 198 #ifndef __ASSEMBLY__
 199 #include <linux/sched.h>
 200 #include <linux/threads.h>
 201 
 202 /* Bits to mask out from a PGD to get to the PUD page */
 203 #define PGD_MASKED_BITS         0
 204 
 205 #define pte_ERROR(e) \
 206         pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
 207                 (unsigned long long)pte_val(e))
 208 #define pgd_ERROR(e) \
 209         pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 210 /*
 211  * Bits in a linux-style PTE.  These match the bits in the
 212  * (hardware-defined) PowerPC PTE as closely as possible.
 213  */
 214 
 215 #define pte_clear(mm, addr, ptep) \
 216         do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
 217 
 218 #define pmd_none(pmd)           (!pmd_val(pmd))
 219 #define pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
 220 #define pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
 221 static inline void pmd_clear(pmd_t *pmdp)
 222 {
 223         *pmdp = __pmd(0);
 224 }
 225 
 226 
 227 /*
 228  * When flushing the tlb entry for a page, we also need to flush the hash
 229  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
 230  */
 231 extern int flush_hash_pages(unsigned context, unsigned long va,
 232                             unsigned long pmdval, int count);
 233 
 234 /* Add an HPTE to the hash table */
 235 extern void add_hash_page(unsigned context, unsigned long va,
 236                           unsigned long pmdval);
 237 
 238 /* Flush an entry from the TLB/hash table */
 239 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
 240                              unsigned long address);
 241 
 242 /*
 243  * PTE updates. This function is called whenever an existing
 244  * valid PTE is updated. This does -not- include set_pte_at()
 245  * which nowadays only sets a new PTE.
 246  *
 247  * Depending on the type of MMU, we may need to use atomic updates
 248  * and the PTE may be either 32 or 64 bit wide. In the later case,
 249  * when using atomic updates, only the low part of the PTE is
 250  * accessed atomically.
 251  *
 252  * In addition, on 44x, we also maintain a global flag indicating
 253  * that an executable user mapping was modified, which is needed
 254  * to properly flush the virtually tagged instruction cache of
 255  * those implementations.
 256  */
 257 #ifndef CONFIG_PTE_64BIT
 258 static inline unsigned long pte_update(pte_t *p,
 259                                        unsigned long clr,
 260                                        unsigned long set)
 261 {
 262         unsigned long old, tmp;
 263 
 264         __asm__ __volatile__("\
 265 1:      lwarx   %0,0,%3\n\
 266         andc    %1,%0,%4\n\
 267         or      %1,%1,%5\n"
 268 "       stwcx.  %1,0,%3\n\
 269         bne-    1b"
 270         : "=&r" (old), "=&r" (tmp), "=m" (*p)
 271         : "r" (p), "r" (clr), "r" (set), "m" (*p)
 272         : "cc" );
 273 
 274         return old;
 275 }
 276 #else /* CONFIG_PTE_64BIT */
 277 static inline unsigned long long pte_update(pte_t *p,
 278                                             unsigned long clr,
 279                                             unsigned long set)
 280 {
 281         unsigned long long old;
 282         unsigned long tmp;
 283 
 284         __asm__ __volatile__("\
 285 1:      lwarx   %L0,0,%4\n\
 286         lwzx    %0,0,%3\n\
 287         andc    %1,%L0,%5\n\
 288         or      %1,%1,%6\n"
 289 "       stwcx.  %1,0,%4\n\
 290         bne-    1b"
 291         : "=&r" (old), "=&r" (tmp), "=m" (*p)
 292         : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
 293         : "cc" );
 294 
 295         return old;
 296 }
 297 #endif /* CONFIG_PTE_64BIT */
 298 
 299 /*
 300  * 2.6 calls this without flushing the TLB entry; this is wrong
 301  * for our hash-based implementation, we fix that up here.
 302  */
 303 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 304 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
 305 {
 306         unsigned long old;
 307         old = pte_update(ptep, _PAGE_ACCESSED, 0);
 308         if (old & _PAGE_HASHPTE) {
 309                 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
 310                 flush_hash_pages(context, addr, ptephys, 1);
 311         }
 312         return (old & _PAGE_ACCESSED) != 0;
 313 }
 314 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
 315         __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
 316 
 317 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 318 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 319                                        pte_t *ptep)
 320 {
 321         return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
 322 }
 323 
 324 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 325 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 326                                       pte_t *ptep)
 327 {
 328         pte_update(ptep, _PAGE_RW, 0);
 329 }
 330 
 331 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
 332                                            pte_t *ptep, pte_t entry,
 333                                            unsigned long address,
 334                                            int psize)
 335 {
 336         unsigned long set = pte_val(entry) &
 337                 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
 338 
 339         pte_update(ptep, 0, set);
 340 
 341         flush_tlb_page(vma, address);
 342 }
 343 
 344 #define __HAVE_ARCH_PTE_SAME
 345 #define pte_same(A,B)   (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
 346 
 347 #define pmd_page_vaddr(pmd)     \
 348         ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
 349 #define pmd_page(pmd)           \
 350         pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
 351 
 352 /* to find an entry in a kernel page-table-directory */
 353 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 354 
 355 /* to find an entry in a page-table-directory */
 356 #define pgd_index(address)       ((address) >> PGDIR_SHIFT)
 357 #define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
 358 
 359 /* Find an entry in the third-level page table.. */
 360 #define pte_index(address)              \
 361         (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 362 #define pte_offset_kernel(dir, addr)    \
 363         ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
 364 #define pte_offset_map(dir, addr)               \
 365         ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
 366                    (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
 367 #define pte_unmap(pte)          kunmap_atomic(pte)
 368 
 369 /*
 370  * Encode and decode a swap entry.
 371  * Note that the bits we use in a PTE for representing a swap entry
 372  * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
 373  *   -- paulus
 374  */
 375 #define __swp_type(entry)               ((entry).val & 0x1f)
 376 #define __swp_offset(entry)             ((entry).val >> 5)
 377 #define __swp_entry(type, offset)       ((swp_entry_t) { (type) | ((offset) << 5) })
 378 #define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) >> 3 })
 379 #define __swp_entry_to_pte(x)           ((pte_t) { (x).val << 3 })
 380 
 381 /* Generic accessors to PTE bits */
 382 static inline int pte_write(pte_t pte)          { return !!(pte_val(pte) & _PAGE_RW);}
 383 static inline int pte_read(pte_t pte)           { return 1; }
 384 static inline int pte_dirty(pte_t pte)          { return !!(pte_val(pte) & _PAGE_DIRTY); }
 385 static inline int pte_young(pte_t pte)          { return !!(pte_val(pte) & _PAGE_ACCESSED); }
 386 static inline int pte_special(pte_t pte)        { return !!(pte_val(pte) & _PAGE_SPECIAL); }
 387 static inline int pte_none(pte_t pte)           { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
 388 static inline bool pte_exec(pte_t pte)          { return pte_val(pte) & _PAGE_EXEC; }
 389 
 390 static inline int pte_present(pte_t pte)
 391 {
 392         return pte_val(pte) & _PAGE_PRESENT;
 393 }
 394 
 395 static inline bool pte_hw_valid(pte_t pte)
 396 {
 397         return pte_val(pte) & _PAGE_PRESENT;
 398 }
 399 
 400 static inline bool pte_hashpte(pte_t pte)
 401 {
 402         return !!(pte_val(pte) & _PAGE_HASHPTE);
 403 }
 404 
 405 static inline bool pte_ci(pte_t pte)
 406 {
 407         return !!(pte_val(pte) & _PAGE_NO_CACHE);
 408 }
 409 
 410 /*
 411  * We only find page table entry in the last level
 412  * Hence no need for other accessors
 413  */
 414 #define pte_access_permitted pte_access_permitted
 415 static inline bool pte_access_permitted(pte_t pte, bool write)
 416 {
 417         /*
 418          * A read-only access is controlled by _PAGE_USER bit.
 419          * We have _PAGE_READ set for WRITE and EXECUTE
 420          */
 421         if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
 422                 return false;
 423 
 424         if (write && !pte_write(pte))
 425                 return false;
 426 
 427         return true;
 428 }
 429 
 430 /* Conversion functions: convert a page and protection to a page entry,
 431  * and a page entry and page directory to the page they refer to.
 432  *
 433  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
 434  * long for now.
 435  */
 436 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
 437 {
 438         return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
 439                      pgprot_val(pgprot));
 440 }
 441 
 442 static inline unsigned long pte_pfn(pte_t pte)
 443 {
 444         return pte_val(pte) >> PTE_RPN_SHIFT;
 445 }
 446 
 447 /* Generic modifiers for PTE bits */
 448 static inline pte_t pte_wrprotect(pte_t pte)
 449 {
 450         return __pte(pte_val(pte) & ~_PAGE_RW);
 451 }
 452 
 453 static inline pte_t pte_exprotect(pte_t pte)
 454 {
 455         return __pte(pte_val(pte) & ~_PAGE_EXEC);
 456 }
 457 
 458 static inline pte_t pte_mkclean(pte_t pte)
 459 {
 460         return __pte(pte_val(pte) & ~_PAGE_DIRTY);
 461 }
 462 
 463 static inline pte_t pte_mkold(pte_t pte)
 464 {
 465         return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
 466 }
 467 
 468 static inline pte_t pte_mkexec(pte_t pte)
 469 {
 470         return __pte(pte_val(pte) | _PAGE_EXEC);
 471 }
 472 
 473 static inline pte_t pte_mkpte(pte_t pte)
 474 {
 475         return pte;
 476 }
 477 
 478 static inline pte_t pte_mkwrite(pte_t pte)
 479 {
 480         return __pte(pte_val(pte) | _PAGE_RW);
 481 }
 482 
 483 static inline pte_t pte_mkdirty(pte_t pte)
 484 {
 485         return __pte(pte_val(pte) | _PAGE_DIRTY);
 486 }
 487 
 488 static inline pte_t pte_mkyoung(pte_t pte)
 489 {
 490         return __pte(pte_val(pte) | _PAGE_ACCESSED);
 491 }
 492 
 493 static inline pte_t pte_mkspecial(pte_t pte)
 494 {
 495         return __pte(pte_val(pte) | _PAGE_SPECIAL);
 496 }
 497 
 498 static inline pte_t pte_mkhuge(pte_t pte)
 499 {
 500         return pte;
 501 }
 502 
 503 static inline pte_t pte_mkprivileged(pte_t pte)
 504 {
 505         return __pte(pte_val(pte) & ~_PAGE_USER);
 506 }
 507 
 508 static inline pte_t pte_mkuser(pte_t pte)
 509 {
 510         return __pte(pte_val(pte) | _PAGE_USER);
 511 }
 512 
 513 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 514 {
 515         return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
 516 }
 517 
 518 
 519 
 520 /* This low level function performs the actual PTE insertion
 521  * Setting the PTE depends on the MMU type and other factors. It's
 522  * an horrible mess that I'm not going to try to clean up now but
 523  * I'm keeping it in one place rather than spread around
 524  */
 525 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 526                                 pte_t *ptep, pte_t pte, int percpu)
 527 {
 528 #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
 529         /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
 530          * helper pte_update() which does an atomic update. We need to do that
 531          * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
 532          * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
 533          * the hash bits instead (ie, same as the non-SMP case)
 534          */
 535         if (percpu)
 536                 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 537                               | (pte_val(pte) & ~_PAGE_HASHPTE));
 538         else
 539                 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
 540 
 541 #elif defined(CONFIG_PTE_64BIT)
 542         /* Second case is 32-bit with 64-bit PTE.  In this case, we
 543          * can just store as long as we do the two halves in the right order
 544          * with a barrier in between. This is possible because we take care,
 545          * in the hash code, to pre-invalidate if the PTE was already hashed,
 546          * which synchronizes us with any concurrent invalidation.
 547          * In the percpu case, we also fallback to the simple update preserving
 548          * the hash bits
 549          */
 550         if (percpu) {
 551                 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 552                               | (pte_val(pte) & ~_PAGE_HASHPTE));
 553                 return;
 554         }
 555         if (pte_val(*ptep) & _PAGE_HASHPTE)
 556                 flush_hash_entry(mm, ptep, addr);
 557         __asm__ __volatile__("\
 558                 stw%U0%X0 %2,%0\n\
 559                 eieio\n\
 560                 stw%U0%X0 %L2,%1"
 561         : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
 562         : "r" (pte) : "memory");
 563 
 564 #else
 565         /* Third case is 32-bit hash table in UP mode, we need to preserve
 566          * the _PAGE_HASHPTE bit since we may not have invalidated the previous
 567          * translation in the hash yet (done in a subsequent flush_tlb_xxx())
 568          * and see we need to keep track that this PTE needs invalidating
 569          */
 570         *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
 571                       | (pte_val(pte) & ~_PAGE_HASHPTE));
 572 #endif
 573 }
 574 
 575 /*
 576  * Macro to mark a page protection value as "uncacheable".
 577  */
 578 
 579 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
 580                          _PAGE_WRITETHRU)
 581 
 582 #define pgprot_noncached pgprot_noncached
 583 static inline pgprot_t pgprot_noncached(pgprot_t prot)
 584 {
 585         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 586                         _PAGE_NO_CACHE | _PAGE_GUARDED);
 587 }
 588 
 589 #define pgprot_noncached_wc pgprot_noncached_wc
 590 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
 591 {
 592         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 593                         _PAGE_NO_CACHE);
 594 }
 595 
 596 #define pgprot_cached pgprot_cached
 597 static inline pgprot_t pgprot_cached(pgprot_t prot)
 598 {
 599         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 600                         _PAGE_COHERENT);
 601 }
 602 
 603 #define pgprot_cached_wthru pgprot_cached_wthru
 604 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
 605 {
 606         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
 607                         _PAGE_COHERENT | _PAGE_WRITETHRU);
 608 }
 609 
 610 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
 611 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
 612 {
 613         return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
 614 }
 615 
 616 #define pgprot_writecombine pgprot_writecombine
 617 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
 618 {
 619         return pgprot_noncached_wc(prot);
 620 }
 621 
 622 #endif /* !__ASSEMBLY__ */
 623 
 624 #endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */

/* [<][>][^][v][top][bottom][index][help] */