root/arch/powerpc/include/asm/nohash/pgtable.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pte_write
  2. pte_read
  3. pte_dirty
  4. pte_special
  5. pte_none
  6. pte_hashpte
  7. pte_ci
  8. pte_exec
  9. pte_protnone
  10. pmd_protnone
  11. pte_present
  12. pte_hw_valid
  13. pte_user
  14. pte_access_permitted
  15. pfn_pte
  16. pte_pfn
  17. pte_exprotect
  18. pte_mkclean
  19. pte_mkold
  20. pte_mkpte
  21. pte_mkspecial
  22. pte_mkhuge
  23. pte_mkprivileged
  24. pte_mkuser
  25. pte_modify
  26. __set_pte_at
  27. hugepd_ok
  28. pmd_huge
  29. pud_huge
  30. pgd_huge
  31. update_mmu_cache

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
   3 #define _ASM_POWERPC_NOHASH_PGTABLE_H
   4 
   5 #if defined(CONFIG_PPC64)
   6 #include <asm/nohash/64/pgtable.h>
   7 #else
   8 #include <asm/nohash/32/pgtable.h>
   9 #endif
  10 
  11 /* Permission masks used for kernel mappings */
  12 #define PAGE_KERNEL     __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
  13 #define PAGE_KERNEL_NC  __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
  14 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
  15                                  _PAGE_NO_CACHE | _PAGE_GUARDED)
  16 #define PAGE_KERNEL_X   __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
  17 #define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
  18 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
  19 
  20 /*
  21  * Protection used for kernel text. We want the debuggers to be able to
  22  * set breakpoints anywhere, so don't write protect the kernel text
  23  * on platforms where such control is possible.
  24  */
  25 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
  26         defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
  27 #define PAGE_KERNEL_TEXT        PAGE_KERNEL_X
  28 #else
  29 #define PAGE_KERNEL_TEXT        PAGE_KERNEL_ROX
  30 #endif
  31 
  32 /* Make modules code happy. We don't set RO yet */
  33 #define PAGE_KERNEL_EXEC        PAGE_KERNEL_X
  34 
  35 /* Advertise special mapping type for AGP */
  36 #define PAGE_AGP                (PAGE_KERNEL_NC)
  37 #define HAVE_PAGE_AGP
  38 
  39 #ifndef __ASSEMBLY__
  40 
  41 /* Generic accessors to PTE bits */
  42 #ifndef pte_write
  43 static inline int pte_write(pte_t pte)
  44 {
  45         return pte_val(pte) & _PAGE_RW;
  46 }
  47 #endif
  48 static inline int pte_read(pte_t pte)           { return 1; }
  49 static inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
  50 static inline int pte_special(pte_t pte)        { return pte_val(pte) & _PAGE_SPECIAL; }
  51 static inline int pte_none(pte_t pte)           { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
  52 static inline bool pte_hashpte(pte_t pte)       { return false; }
  53 static inline bool pte_ci(pte_t pte)            { return pte_val(pte) & _PAGE_NO_CACHE; }
  54 static inline bool pte_exec(pte_t pte)          { return pte_val(pte) & _PAGE_EXEC; }
  55 
  56 #ifdef CONFIG_NUMA_BALANCING
  57 /*
  58  * These work without NUMA balancing but the kernel does not care. See the
  59  * comment in include/asm-generic/pgtable.h . On powerpc, this will only
  60  * work for user pages and always return true for kernel pages.
  61  */
  62 static inline int pte_protnone(pte_t pte)
  63 {
  64         return pte_present(pte) && !pte_user(pte);
  65 }
  66 
  67 static inline int pmd_protnone(pmd_t pmd)
  68 {
  69         return pte_protnone(pmd_pte(pmd));
  70 }
  71 #endif /* CONFIG_NUMA_BALANCING */
  72 
  73 static inline int pte_present(pte_t pte)
  74 {
  75         return pte_val(pte) & _PAGE_PRESENT;
  76 }
  77 
  78 static inline bool pte_hw_valid(pte_t pte)
  79 {
  80         return pte_val(pte) & _PAGE_PRESENT;
  81 }
  82 
  83 /*
  84  * Don't just check for any non zero bits in __PAGE_USER, since for book3e
  85  * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
  86  * _PAGE_USER.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
  87  */
  88 #ifndef pte_user
  89 static inline bool pte_user(pte_t pte)
  90 {
  91         return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
  92 }
  93 #endif
  94 
  95 /*
  96  * We only find page table entry in the last level
  97  * Hence no need for other accessors
  98  */
  99 #define pte_access_permitted pte_access_permitted
 100 static inline bool pte_access_permitted(pte_t pte, bool write)
 101 {
 102         /*
 103          * A read-only access is controlled by _PAGE_USER bit.
 104          * We have _PAGE_READ set for WRITE and EXECUTE
 105          */
 106         if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
 107                 return false;
 108 
 109         if (write && !pte_write(pte))
 110                 return false;
 111 
 112         return true;
 113 }
 114 
 115 /* Conversion functions: convert a page and protection to a page entry,
 116  * and a page entry and page directory to the page they refer to.
 117  *
 118  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
 119  * long for now.
 120  */
 121 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
 122         return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
 123                      pgprot_val(pgprot)); }
 124 static inline unsigned long pte_pfn(pte_t pte)  {
 125         return pte_val(pte) >> PTE_RPN_SHIFT; }
 126 
 127 /* Generic modifiers for PTE bits */
 128 static inline pte_t pte_exprotect(pte_t pte)
 129 {
 130         return __pte(pte_val(pte) & ~_PAGE_EXEC);
 131 }
 132 
 133 #ifndef pte_mkclean
 134 static inline pte_t pte_mkclean(pte_t pte)
 135 {
 136         return __pte(pte_val(pte) & ~_PAGE_DIRTY);
 137 }
 138 #endif
 139 
 140 static inline pte_t pte_mkold(pte_t pte)
 141 {
 142         return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
 143 }
 144 
 145 static inline pte_t pte_mkpte(pte_t pte)
 146 {
 147         return pte;
 148 }
 149 
 150 static inline pte_t pte_mkspecial(pte_t pte)
 151 {
 152         return __pte(pte_val(pte) | _PAGE_SPECIAL);
 153 }
 154 
 155 #ifndef pte_mkhuge
 156 static inline pte_t pte_mkhuge(pte_t pte)
 157 {
 158         return __pte(pte_val(pte));
 159 }
 160 #endif
 161 
 162 #ifndef pte_mkprivileged
 163 static inline pte_t pte_mkprivileged(pte_t pte)
 164 {
 165         return __pte(pte_val(pte) & ~_PAGE_USER);
 166 }
 167 #endif
 168 
 169 #ifndef pte_mkuser
 170 static inline pte_t pte_mkuser(pte_t pte)
 171 {
 172         return __pte(pte_val(pte) | _PAGE_USER);
 173 }
 174 #endif
 175 
 176 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 177 {
 178         return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
 179 }
 180 
 181 /* Insert a PTE, top-level function is out of line. It uses an inline
 182  * low level function in the respective pgtable-* files
 183  */
 184 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
 185                        pte_t pte);
 186 
 187 /* This low level function performs the actual PTE insertion
 188  * Setting the PTE depends on the MMU type and other factors. It's
 189  * an horrible mess that I'm not going to try to clean up now but
 190  * I'm keeping it in one place rather than spread around
 191  */
 192 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 193                                 pte_t *ptep, pte_t pte, int percpu)
 194 {
 195         /* Second case is 32-bit with 64-bit PTE.  In this case, we
 196          * can just store as long as we do the two halves in the right order
 197          * with a barrier in between.
 198          * In the percpu case, we also fallback to the simple update
 199          */
 200         if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
 201                 __asm__ __volatile__("\
 202                         stw%U0%X0 %2,%0\n\
 203                         eieio\n\
 204                         stw%U0%X0 %L2,%1"
 205                 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
 206                 : "r" (pte) : "memory");
 207                 return;
 208         }
 209         /* Anything else just stores the PTE normally. That covers all 64-bit
 210          * cases, and 32-bit non-hash with 32-bit PTEs.
 211          */
 212 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
 213         ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte);
 214 #else
 215         *ptep = pte;
 216 #endif
 217 
 218         /*
 219          * With hardware tablewalk, a sync is needed to ensure that
 220          * subsequent accesses see the PTE we just wrote.  Unlike userspace
 221          * mappings, we can't tolerate spurious faults, so make sure
 222          * the new PTE will be seen the first time.
 223          */
 224         if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
 225                 mb();
 226 }
 227 
 228 
 229 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 230 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 231                                  pte_t *ptep, pte_t entry, int dirty);
 232 
 233 /*
 234  * Macro to mark a page protection value as "uncacheable".
 235  */
 236 
 237 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
 238                          _PAGE_WRITETHRU)
 239 
 240 #define pgprot_noncached(prot)    (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 241                                             _PAGE_NO_CACHE | _PAGE_GUARDED))
 242 
 243 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 244                                             _PAGE_NO_CACHE))
 245 
 246 #define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 247                                             _PAGE_COHERENT))
 248 
 249 #if _PAGE_WRITETHRU != 0
 250 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
 251                                             _PAGE_COHERENT | _PAGE_WRITETHRU))
 252 #else
 253 #define pgprot_cached_wthru(prot)       pgprot_noncached(prot)
 254 #endif
 255 
 256 #define pgprot_cached_noncoherent(prot) \
 257                 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
 258 
 259 #define pgprot_writecombine pgprot_noncached_wc
 260 
 261 struct file;
 262 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 263                                      unsigned long size, pgprot_t vma_prot);
 264 #define __HAVE_PHYS_MEM_ACCESS_PROT
 265 
 266 #ifdef CONFIG_HUGETLB_PAGE
 267 static inline int hugepd_ok(hugepd_t hpd)
 268 {
 269 #ifdef CONFIG_PPC_8xx
 270         return ((hpd_val(hpd) & 0x4) != 0);
 271 #else
 272         /* We clear the top bit to indicate hugepd */
 273         return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
 274 #endif
 275 }
 276 
 277 static inline int pmd_huge(pmd_t pmd)
 278 {
 279         return 0;
 280 }
 281 
 282 static inline int pud_huge(pud_t pud)
 283 {
 284         return 0;
 285 }
 286 
 287 static inline int pgd_huge(pgd_t pgd)
 288 {
 289         return 0;
 290 }
 291 #define pgd_huge                pgd_huge
 292 
 293 #define is_hugepd(hpd)          (hugepd_ok(hpd))
 294 #endif
 295 
 296 /*
 297  * This gets called at the end of handling a page fault, when
 298  * the kernel has put a new PTE into the page table for the process.
 299  * We use it to ensure coherency between the i-cache and d-cache
 300  * for the page which has just been mapped in.
 301  */
 302 #if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
 303 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
 304 #else
 305 static inline
 306 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
 307 #endif
 308 
 309 #endif /* __ASSEMBLY__ */
 310 #endif

/* [<][>][^][v][top][bottom][index][help] */