root/include/asm-generic/pgtable.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. pmdp_set_access_flags
  2. pudp_set_access_flags
  3. ptep_test_and_clear_young
  4. pmdp_test_and_clear_young
  5. pmdp_test_and_clear_young
  6. pmdp_clear_flush_young
  7. ptep_get_and_clear
  8. pmdp_huge_get_and_clear
  9. pudp_huge_get_and_clear
  10. pmdp_huge_get_and_clear_full
  11. pudp_huge_get_and_clear_full
  12. ptep_get_and_clear_full
  13. pte_clear_not_present_full
  14. ptep_set_wrprotect
  15. pmdp_set_wrprotect
  16. pmdp_set_wrprotect
  17. pudp_set_wrprotect
  18. pudp_set_wrprotect
  19. pmdp_collapse_flush
  20. generic_pmdp_establish
  21. pte_same
  22. pte_unused
  23. pmd_same
  24. pud_same
  25. p4d_same
  26. pgd_same
  27. arch_do_swap_page
  28. arch_unmap_one
  29. pgprot_modify
  30. pgd_none_or_clear_bad
  31. p4d_none_or_clear_bad
  32. pud_none_or_clear_bad
  33. pmd_none_or_clear_bad
  34. __ptep_modify_prot_start
  35. __ptep_modify_prot_commit
  36. ptep_modify_prot_start
  37. ptep_modify_prot_commit
  38. pmd_swp_mksoft_dirty
  39. pmd_swp_soft_dirty
  40. pmd_swp_clear_soft_dirty
  41. pte_soft_dirty
  42. pmd_soft_dirty
  43. pte_mksoft_dirty
  44. pmd_mksoft_dirty
  45. pte_clear_soft_dirty
  46. pmd_clear_soft_dirty
  47. pte_swp_mksoft_dirty
  48. pte_swp_soft_dirty
  49. pte_swp_clear_soft_dirty
  50. pmd_swp_mksoft_dirty
  51. pmd_swp_soft_dirty
  52. pmd_swp_clear_soft_dirty
  53. track_pfn_remap
  54. track_pfn_insert
  55. track_pfn_copy
  56. untrack_pfn
  57. untrack_pfn_moved
  58. is_zero_pfn
  59. is_zero_pfn
  60. my_zero_pfn
  61. pmd_trans_huge
  62. pmd_write
  63. pud_write
  64. pud_trans_huge
  65. pmd_read_atomic
  66. pmd_none_or_trans_huge_or_clear_bad
  67. pmd_trans_unstable
  68. pte_protnone
  69. pmd_protnone
  70. p4d_set_huge
  71. p4d_clear_huge
  72. p4d_set_huge
  73. pud_set_huge
  74. pmd_set_huge
  75. p4d_clear_huge
  76. pud_clear_huge
  77. pmd_clear_huge
  78. p4d_free_pud_page
  79. pud_free_pmd_page
  80. pmd_free_pte_page
  81. init_espfix_bsp
  82. pfn_modify_allowed
  83. arch_has_pfn_modify_check

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_GENERIC_PGTABLE_H
   3 #define _ASM_GENERIC_PGTABLE_H
   4 
   5 #include <linux/pfn.h>
   6 
   7 #ifndef __ASSEMBLY__
   8 #ifdef CONFIG_MMU
   9 
  10 #include <linux/mm_types.h>
  11 #include <linux/bug.h>
  12 #include <linux/errno.h>
  13 
  14 #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
  15         defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
  16 #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
  17 #endif
  18 
  19 /*
  20  * On almost all architectures and configurations, 0 can be used as the
  21  * upper ceiling to free_pgtables(): on many architectures it has the same
  22  * effect as using TASK_SIZE.  However, there is one configuration which
  23  * must impose a more careful limit, to avoid freeing kernel pgtables.
  24  */
  25 #ifndef USER_PGTABLES_CEILING
  26 #define USER_PGTABLES_CEILING   0UL
  27 #endif
  28 
  29 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  30 extern int ptep_set_access_flags(struct vm_area_struct *vma,
  31                                  unsigned long address, pte_t *ptep,
  32                                  pte_t entry, int dirty);
  33 #endif
  34 
  35 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  36 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  37 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  38                                  unsigned long address, pmd_t *pmdp,
  39                                  pmd_t entry, int dirty);
  40 extern int pudp_set_access_flags(struct vm_area_struct *vma,
  41                                  unsigned long address, pud_t *pudp,
  42                                  pud_t entry, int dirty);
  43 #else
  44 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
  45                                         unsigned long address, pmd_t *pmdp,
  46                                         pmd_t entry, int dirty)
  47 {
  48         BUILD_BUG();
  49         return 0;
  50 }
  51 static inline int pudp_set_access_flags(struct vm_area_struct *vma,
  52                                         unsigned long address, pud_t *pudp,
  53                                         pud_t entry, int dirty)
  54 {
  55         BUILD_BUG();
  56         return 0;
  57 }
  58 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  59 #endif
  60 
  61 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  62 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
  63                                             unsigned long address,
  64                                             pte_t *ptep)
  65 {
  66         pte_t pte = *ptep;
  67         int r = 1;
  68         if (!pte_young(pte))
  69                 r = 0;
  70         else
  71                 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
  72         return r;
  73 }
  74 #endif
  75 
  76 #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  77 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  78 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  79                                             unsigned long address,
  80                                             pmd_t *pmdp)
  81 {
  82         pmd_t pmd = *pmdp;
  83         int r = 1;
  84         if (!pmd_young(pmd))
  85                 r = 0;
  86         else
  87                 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
  88         return r;
  89 }
  90 #else
  91 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  92                                             unsigned long address,
  93                                             pmd_t *pmdp)
  94 {
  95         BUILD_BUG();
  96         return 0;
  97 }
  98 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  99 #endif
 100 
 101 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 102 int ptep_clear_flush_young(struct vm_area_struct *vma,
 103                            unsigned long address, pte_t *ptep);
 104 #endif
 105 
 106 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
 107 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 108 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
 109                                   unsigned long address, pmd_t *pmdp);
 110 #else
 111 /*
 112  * Despite relevant to THP only, this API is called from generic rmap code
 113  * under PageTransHuge(), hence needs a dummy implementation for !THP
 114  */
 115 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
 116                                          unsigned long address, pmd_t *pmdp)
 117 {
 118         BUILD_BUG();
 119         return 0;
 120 }
 121 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 122 #endif
 123 
 124 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
 125 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 126                                        unsigned long address,
 127                                        pte_t *ptep)
 128 {
 129         pte_t pte = *ptep;
 130         pte_clear(mm, address, ptep);
 131         return pte;
 132 }
 133 #endif
 134 
 135 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 136 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
 137 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
 138                                             unsigned long address,
 139                                             pmd_t *pmdp)
 140 {
 141         pmd_t pmd = *pmdp;
 142         pmd_clear(pmdp);
 143         return pmd;
 144 }
 145 #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
 146 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
 147 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
 148                                             unsigned long address,
 149                                             pud_t *pudp)
 150 {
 151         pud_t pud = *pudp;
 152 
 153         pud_clear(pudp);
 154         return pud;
 155 }
 156 #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
 157 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 158 
 159 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 160 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
 161 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
 162                                             unsigned long address, pmd_t *pmdp,
 163                                             int full)
 164 {
 165         return pmdp_huge_get_and_clear(mm, address, pmdp);
 166 }
 167 #endif
 168 
 169 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
 170 static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
 171                                             unsigned long address, pud_t *pudp,
 172                                             int full)
 173 {
 174         return pudp_huge_get_and_clear(mm, address, pudp);
 175 }
 176 #endif
 177 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 178 
 179 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 180 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 181                                             unsigned long address, pte_t *ptep,
 182                                             int full)
 183 {
 184         pte_t pte;
 185         pte = ptep_get_and_clear(mm, address, ptep);
 186         return pte;
 187 }
 188 #endif
 189 
 190 /*
 191  * Some architectures may be able to avoid expensive synchronization
 192  * primitives when modifications are made to PTE's which are already
 193  * not present, or in the process of an address space destruction.
 194  */
 195 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
 196 static inline void pte_clear_not_present_full(struct mm_struct *mm,
 197                                               unsigned long address,
 198                                               pte_t *ptep,
 199                                               int full)
 200 {
 201         pte_clear(mm, address, ptep);
 202 }
 203 #endif
 204 
 205 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
 206 extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
 207                               unsigned long address,
 208                               pte_t *ptep);
 209 #endif
 210 
 211 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
 212 extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
 213                               unsigned long address,
 214                               pmd_t *pmdp);
 215 extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
 216                               unsigned long address,
 217                               pud_t *pudp);
 218 #endif
 219 
 220 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
 221 struct mm_struct;
 222 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
 223 {
 224         pte_t old_pte = *ptep;
 225         set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
 226 }
 227 #endif
 228 
 229 #ifndef pte_savedwrite
 230 #define pte_savedwrite pte_write
 231 #endif
 232 
 233 #ifndef pte_mk_savedwrite
 234 #define pte_mk_savedwrite pte_mkwrite
 235 #endif
 236 
 237 #ifndef pte_clear_savedwrite
 238 #define pte_clear_savedwrite pte_wrprotect
 239 #endif
 240 
 241 #ifndef pmd_savedwrite
 242 #define pmd_savedwrite pmd_write
 243 #endif
 244 
 245 #ifndef pmd_mk_savedwrite
 246 #define pmd_mk_savedwrite pmd_mkwrite
 247 #endif
 248 
 249 #ifndef pmd_clear_savedwrite
 250 #define pmd_clear_savedwrite pmd_wrprotect
 251 #endif
 252 
 253 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
 254 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 255 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 256                                       unsigned long address, pmd_t *pmdp)
 257 {
 258         pmd_t old_pmd = *pmdp;
 259         set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
 260 }
 261 #else
 262 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 263                                       unsigned long address, pmd_t *pmdp)
 264 {
 265         BUILD_BUG();
 266 }
 267 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 268 #endif
 269 #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
 270 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 271 static inline void pudp_set_wrprotect(struct mm_struct *mm,
 272                                       unsigned long address, pud_t *pudp)
 273 {
 274         pud_t old_pud = *pudp;
 275 
 276         set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
 277 }
 278 #else
 279 static inline void pudp_set_wrprotect(struct mm_struct *mm,
 280                                       unsigned long address, pud_t *pudp)
 281 {
 282         BUILD_BUG();
 283 }
 284 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 285 #endif
 286 
 287 #ifndef pmdp_collapse_flush
 288 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 289 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
 290                                  unsigned long address, pmd_t *pmdp);
 291 #else
 292 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
 293                                         unsigned long address,
 294                                         pmd_t *pmdp)
 295 {
 296         BUILD_BUG();
 297         return *pmdp;
 298 }
 299 #define pmdp_collapse_flush pmdp_collapse_flush
 300 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 301 #endif
 302 
 303 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
 304 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 305                                        pgtable_t pgtable);
 306 #endif
 307 
 308 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
 309 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 310 #endif
 311 
 312 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 313 /*
 314  * This is an implementation of pmdp_establish() that is only suitable for an
 315  * architecture that doesn't have hardware dirty/accessed bits. In this case we
 316  * can't race with CPU which sets these bits and non-atomic aproach is fine.
 317  */
 318 static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
 319                 unsigned long address, pmd_t *pmdp, pmd_t pmd)
 320 {
 321         pmd_t old_pmd = *pmdp;
 322         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 323         return old_pmd;
 324 }
 325 #endif
 326 
 327 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
 328 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 329                             pmd_t *pmdp);
 330 #endif
 331 
 332 #ifndef __HAVE_ARCH_PTE_SAME
 333 static inline int pte_same(pte_t pte_a, pte_t pte_b)
 334 {
 335         return pte_val(pte_a) == pte_val(pte_b);
 336 }
 337 #endif
 338 
 339 #ifndef __HAVE_ARCH_PTE_UNUSED
 340 /*
 341  * Some architectures provide facilities to virtualization guests
 342  * so that they can flag allocated pages as unused. This allows the
 343  * host to transparently reclaim unused pages. This function returns
 344  * whether the pte's page is unused.
 345  */
 346 static inline int pte_unused(pte_t pte)
 347 {
 348         return 0;
 349 }
 350 #endif
 351 
 352 #ifndef pte_access_permitted
 353 #define pte_access_permitted(pte, write) \
 354         (pte_present(pte) && (!(write) || pte_write(pte)))
 355 #endif
 356 
 357 #ifndef pmd_access_permitted
 358 #define pmd_access_permitted(pmd, write) \
 359         (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
 360 #endif
 361 
 362 #ifndef pud_access_permitted
 363 #define pud_access_permitted(pud, write) \
 364         (pud_present(pud) && (!(write) || pud_write(pud)))
 365 #endif
 366 
 367 #ifndef p4d_access_permitted
 368 #define p4d_access_permitted(p4d, write) \
 369         (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
 370 #endif
 371 
 372 #ifndef pgd_access_permitted
 373 #define pgd_access_permitted(pgd, write) \
 374         (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
 375 #endif
 376 
 377 #ifndef __HAVE_ARCH_PMD_SAME
 378 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 379 {
 380         return pmd_val(pmd_a) == pmd_val(pmd_b);
 381 }
 382 
 383 static inline int pud_same(pud_t pud_a, pud_t pud_b)
 384 {
 385         return pud_val(pud_a) == pud_val(pud_b);
 386 }
 387 #endif
 388 
 389 #ifndef __HAVE_ARCH_P4D_SAME
 390 static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
 391 {
 392         return p4d_val(p4d_a) == p4d_val(p4d_b);
 393 }
 394 #endif
 395 
 396 #ifndef __HAVE_ARCH_PGD_SAME
 397 static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
 398 {
 399         return pgd_val(pgd_a) == pgd_val(pgd_b);
 400 }
 401 #endif
 402 
 403 /*
 404  * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
 405  * TLB flush will be required as a result of the "set". For example, use
 406  * in scenarios where it is known ahead of time that the routine is
 407  * setting non-present entries, or re-setting an existing entry to the
 408  * same value. Otherwise, use the typical "set" helpers and flush the
 409  * TLB.
 410  */
 411 #define set_pte_safe(ptep, pte) \
 412 ({ \
 413         WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
 414         set_pte(ptep, pte); \
 415 })
 416 
 417 #define set_pmd_safe(pmdp, pmd) \
 418 ({ \
 419         WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
 420         set_pmd(pmdp, pmd); \
 421 })
 422 
 423 #define set_pud_safe(pudp, pud) \
 424 ({ \
 425         WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
 426         set_pud(pudp, pud); \
 427 })
 428 
 429 #define set_p4d_safe(p4dp, p4d) \
 430 ({ \
 431         WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
 432         set_p4d(p4dp, p4d); \
 433 })
 434 
 435 #define set_pgd_safe(pgdp, pgd) \
 436 ({ \
 437         WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
 438         set_pgd(pgdp, pgd); \
 439 })
 440 
 441 #ifndef __HAVE_ARCH_DO_SWAP_PAGE
 442 /*
 443  * Some architectures support metadata associated with a page. When a
 444  * page is being swapped out, this metadata must be saved so it can be
 445  * restored when the page is swapped back in. SPARC M7 and newer
 446  * processors support an ADI (Application Data Integrity) tag for the
 447  * page as metadata for the page. arch_do_swap_page() can restore this
 448  * metadata when a page is swapped back in.
 449  */
 450 static inline void arch_do_swap_page(struct mm_struct *mm,
 451                                      struct vm_area_struct *vma,
 452                                      unsigned long addr,
 453                                      pte_t pte, pte_t oldpte)
 454 {
 455 
 456 }
 457 #endif
 458 
 459 #ifndef __HAVE_ARCH_UNMAP_ONE
 460 /*
 461  * Some architectures support metadata associated with a page. When a
 462  * page is being swapped out, this metadata must be saved so it can be
 463  * restored when the page is swapped back in. SPARC M7 and newer
 464  * processors support an ADI (Application Data Integrity) tag for the
 465  * page as metadata for the page. arch_unmap_one() can save this
 466  * metadata on a swap-out of a page.
 467  */
 468 static inline int arch_unmap_one(struct mm_struct *mm,
 469                                   struct vm_area_struct *vma,
 470                                   unsigned long addr,
 471                                   pte_t orig_pte)
 472 {
 473         return 0;
 474 }
 475 #endif
 476 
 477 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
 478 #define pgd_offset_gate(mm, addr)       pgd_offset(mm, addr)
 479 #endif
 480 
 481 #ifndef __HAVE_ARCH_MOVE_PTE
 482 #define move_pte(pte, prot, old_addr, new_addr) (pte)
 483 #endif
 484 
 485 #ifndef pte_accessible
 486 # define pte_accessible(mm, pte)        ((void)(pte), 1)
 487 #endif
 488 
 489 #ifndef flush_tlb_fix_spurious_fault
 490 #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
 491 #endif
 492 
 493 #ifndef pgprot_noncached
 494 #define pgprot_noncached(prot)  (prot)
 495 #endif
 496 
 497 #ifndef pgprot_writecombine
 498 #define pgprot_writecombine pgprot_noncached
 499 #endif
 500 
 501 #ifndef pgprot_writethrough
 502 #define pgprot_writethrough pgprot_noncached
 503 #endif
 504 
 505 #ifndef pgprot_device
 506 #define pgprot_device pgprot_noncached
 507 #endif
 508 
 509 #ifndef pgprot_modify
 510 #define pgprot_modify pgprot_modify
 511 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 512 {
 513         if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
 514                 newprot = pgprot_noncached(newprot);
 515         if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
 516                 newprot = pgprot_writecombine(newprot);
 517         if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
 518                 newprot = pgprot_device(newprot);
 519         return newprot;
 520 }
 521 #endif
 522 
 523 /*
 524  * When walking page tables, get the address of the next boundary,
 525  * or the end address of the range if that comes earlier.  Although no
 526  * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
 527  */
 528 
 529 #define pgd_addr_end(addr, end)                                         \
 530 ({      unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;  \
 531         (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 532 })
 533 
 534 #ifndef p4d_addr_end
 535 #define p4d_addr_end(addr, end)                                         \
 536 ({      unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK;      \
 537         (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 538 })
 539 #endif
 540 
 541 #ifndef pud_addr_end
 542 #define pud_addr_end(addr, end)                                         \
 543 ({      unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK;      \
 544         (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 545 })
 546 #endif
 547 
 548 #ifndef pmd_addr_end
 549 #define pmd_addr_end(addr, end)                                         \
 550 ({      unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK;      \
 551         (__boundary - 1 < (end) - 1)? __boundary: (end);                \
 552 })
 553 #endif
 554 
 555 /*
 556  * When walking page tables, we usually want to skip any p?d_none entries;
 557  * and any p?d_bad entries - reporting the error before resetting to none.
 558  * Do the tests inline, but report and clear the bad entry in mm/memory.c.
 559  */
 560 void pgd_clear_bad(pgd_t *);
 561 void p4d_clear_bad(p4d_t *);
 562 void pud_clear_bad(pud_t *);
 563 void pmd_clear_bad(pmd_t *);
 564 
 565 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
 566 {
 567         if (pgd_none(*pgd))
 568                 return 1;
 569         if (unlikely(pgd_bad(*pgd))) {
 570                 pgd_clear_bad(pgd);
 571                 return 1;
 572         }
 573         return 0;
 574 }
 575 
 576 static inline int p4d_none_or_clear_bad(p4d_t *p4d)
 577 {
 578         if (p4d_none(*p4d))
 579                 return 1;
 580         if (unlikely(p4d_bad(*p4d))) {
 581                 p4d_clear_bad(p4d);
 582                 return 1;
 583         }
 584         return 0;
 585 }
 586 
 587 static inline int pud_none_or_clear_bad(pud_t *pud)
 588 {
 589         if (pud_none(*pud))
 590                 return 1;
 591         if (unlikely(pud_bad(*pud))) {
 592                 pud_clear_bad(pud);
 593                 return 1;
 594         }
 595         return 0;
 596 }
 597 
 598 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
 599 {
 600         if (pmd_none(*pmd))
 601                 return 1;
 602         if (unlikely(pmd_bad(*pmd))) {
 603                 pmd_clear_bad(pmd);
 604                 return 1;
 605         }
 606         return 0;
 607 }
 608 
 609 static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
 610                                              unsigned long addr,
 611                                              pte_t *ptep)
 612 {
 613         /*
 614          * Get the current pte state, but zero it out to make it
 615          * non-present, preventing the hardware from asynchronously
 616          * updating it.
 617          */
 618         return ptep_get_and_clear(vma->vm_mm, addr, ptep);
 619 }
 620 
 621 static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
 622                                              unsigned long addr,
 623                                              pte_t *ptep, pte_t pte)
 624 {
 625         /*
 626          * The pte is non-present, so there's no hardware state to
 627          * preserve.
 628          */
 629         set_pte_at(vma->vm_mm, addr, ptep, pte);
 630 }
 631 
 632 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
 633 /*
 634  * Start a pte protection read-modify-write transaction, which
 635  * protects against asynchronous hardware modifications to the pte.
 636  * The intention is not to prevent the hardware from making pte
 637  * updates, but to prevent any updates it may make from being lost.
 638  *
 639  * This does not protect against other software modifications of the
 640  * pte; the appropriate pte lock must be held over the transation.
 641  *
 642  * Note that this interface is intended to be batchable, meaning that
 643  * ptep_modify_prot_commit may not actually update the pte, but merely
 644  * queue the update to be done at some later time.  The update must be
 645  * actually committed before the pte lock is released, however.
 646  */
 647 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
 648                                            unsigned long addr,
 649                                            pte_t *ptep)
 650 {
 651         return __ptep_modify_prot_start(vma, addr, ptep);
 652 }
 653 
 654 /*
 655  * Commit an update to a pte, leaving any hardware-controlled bits in
 656  * the PTE unmodified.
 657  */
 658 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
 659                                            unsigned long addr,
 660                                            pte_t *ptep, pte_t old_pte, pte_t pte)
 661 {
 662         __ptep_modify_prot_commit(vma, addr, ptep, pte);
 663 }
 664 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
 665 #endif /* CONFIG_MMU */
 666 
 667 /*
 668  * No-op macros that just return the current protection value. Defined here
 669  * because these macros can be used used even if CONFIG_MMU is not defined.
 670  */
 671 #ifndef pgprot_encrypted
 672 #define pgprot_encrypted(prot)  (prot)
 673 #endif
 674 
 675 #ifndef pgprot_decrypted
 676 #define pgprot_decrypted(prot)  (prot)
 677 #endif
 678 
 679 /*
 680  * A facility to provide lazy MMU batching.  This allows PTE updates and
 681  * page invalidations to be delayed until a call to leave lazy MMU mode
 682  * is issued.  Some architectures may benefit from doing this, and it is
 683  * beneficial for both shadow and direct mode hypervisors, which may batch
 684  * the PTE updates which happen during this window.  Note that using this
 685  * interface requires that read hazards be removed from the code.  A read
 686  * hazard could result in the direct mode hypervisor case, since the actual
 687  * write to the page tables may not yet have taken place, so reads though
 688  * a raw PTE pointer after it has been modified are not guaranteed to be
 689  * up to date.  This mode can only be entered and left under the protection of
 690  * the page table locks for all page tables which may be modified.  In the UP
 691  * case, this is required so that preemption is disabled, and in the SMP case,
 692  * it must synchronize the delayed page table writes properly on other CPUs.
 693  */
 694 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 695 #define arch_enter_lazy_mmu_mode()      do {} while (0)
 696 #define arch_leave_lazy_mmu_mode()      do {} while (0)
 697 #define arch_flush_lazy_mmu_mode()      do {} while (0)
 698 #endif
 699 
 700 /*
 701  * A facility to provide batching of the reload of page tables and
 702  * other process state with the actual context switch code for
 703  * paravirtualized guests.  By convention, only one of the batched
 704  * update (lazy) modes (CPU, MMU) should be active at any given time,
 705  * entry should never be nested, and entry and exits should always be
 706  * paired.  This is for sanity of maintaining and reasoning about the
 707  * kernel code.  In this case, the exit (end of the context switch) is
 708  * in architecture-specific code, and so doesn't need a generic
 709  * definition.
 710  */
 711 #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
 712 #define arch_start_context_switch(prev) do {} while (0)
 713 #endif
 714 
 715 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 716 #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
 717 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
 718 {
 719         return pmd;
 720 }
 721 
 722 static inline int pmd_swp_soft_dirty(pmd_t pmd)
 723 {
 724         return 0;
 725 }
 726 
 727 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
 728 {
 729         return pmd;
 730 }
 731 #endif
 732 #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
 733 static inline int pte_soft_dirty(pte_t pte)
 734 {
 735         return 0;
 736 }
 737 
 738 static inline int pmd_soft_dirty(pmd_t pmd)
 739 {
 740         return 0;
 741 }
 742 
 743 static inline pte_t pte_mksoft_dirty(pte_t pte)
 744 {
 745         return pte;
 746 }
 747 
 748 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
 749 {
 750         return pmd;
 751 }
 752 
 753 static inline pte_t pte_clear_soft_dirty(pte_t pte)
 754 {
 755         return pte;
 756 }
 757 
 758 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
 759 {
 760         return pmd;
 761 }
 762 
 763 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
 764 {
 765         return pte;
 766 }
 767 
 768 static inline int pte_swp_soft_dirty(pte_t pte)
 769 {
 770         return 0;
 771 }
 772 
 773 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
 774 {
 775         return pte;
 776 }
 777 
 778 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
 779 {
 780         return pmd;
 781 }
 782 
 783 static inline int pmd_swp_soft_dirty(pmd_t pmd)
 784 {
 785         return 0;
 786 }
 787 
 788 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
 789 {
 790         return pmd;
 791 }
 792 #endif
 793 
 794 #ifndef __HAVE_PFNMAP_TRACKING
 795 /*
 796  * Interfaces that can be used by architecture code to keep track of
 797  * memory type of pfn mappings specified by the remap_pfn_range,
 798  * vmf_insert_pfn.
 799  */
 800 
 801 /*
 802  * track_pfn_remap is called when a _new_ pfn mapping is being established
 803  * by remap_pfn_range() for physical range indicated by pfn and size.
 804  */
 805 static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
 806                                   unsigned long pfn, unsigned long addr,
 807                                   unsigned long size)
 808 {
 809         return 0;
 810 }
 811 
 812 /*
 813  * track_pfn_insert is called when a _new_ single pfn is established
 814  * by vmf_insert_pfn().
 815  */
 816 static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
 817                                     pfn_t pfn)
 818 {
 819 }
 820 
 821 /*
 822  * track_pfn_copy is called when vma that is covering the pfnmap gets
 823  * copied through copy_page_range().
 824  */
 825 static inline int track_pfn_copy(struct vm_area_struct *vma)
 826 {
 827         return 0;
 828 }
 829 
 830 /*
 831  * untrack_pfn is called while unmapping a pfnmap for a region.
 832  * untrack can be called for a specific region indicated by pfn and size or
 833  * can be for the entire vma (in which case pfn, size are zero).
 834  */
 835 static inline void untrack_pfn(struct vm_area_struct *vma,
 836                                unsigned long pfn, unsigned long size)
 837 {
 838 }
 839 
 840 /*
 841  * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
 842  */
 843 static inline void untrack_pfn_moved(struct vm_area_struct *vma)
 844 {
 845 }
 846 #else
 847 extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
 848                            unsigned long pfn, unsigned long addr,
 849                            unsigned long size);
 850 extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
 851                              pfn_t pfn);
 852 extern int track_pfn_copy(struct vm_area_struct *vma);
 853 extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
 854                         unsigned long size);
 855 extern void untrack_pfn_moved(struct vm_area_struct *vma);
 856 #endif
 857 
 858 #ifdef __HAVE_COLOR_ZERO_PAGE
 859 static inline int is_zero_pfn(unsigned long pfn)
 860 {
 861         extern unsigned long zero_pfn;
 862         unsigned long offset_from_zero_pfn = pfn - zero_pfn;
 863         return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
 864 }
 865 
 866 #define my_zero_pfn(addr)       page_to_pfn(ZERO_PAGE(addr))
 867 
 868 #else
 869 static inline int is_zero_pfn(unsigned long pfn)
 870 {
 871         extern unsigned long zero_pfn;
 872         return pfn == zero_pfn;
 873 }
 874 
 875 static inline unsigned long my_zero_pfn(unsigned long addr)
 876 {
 877         extern unsigned long zero_pfn;
 878         return zero_pfn;
 879 }
 880 #endif
 881 
 882 #ifdef CONFIG_MMU
 883 
 884 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
 885 static inline int pmd_trans_huge(pmd_t pmd)
 886 {
 887         return 0;
 888 }
 889 #ifndef pmd_write
 890 static inline int pmd_write(pmd_t pmd)
 891 {
 892         BUG();
 893         return 0;
 894 }
 895 #endif /* pmd_write */
 896 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 897 
 898 #ifndef pud_write
 899 static inline int pud_write(pud_t pud)
 900 {
 901         BUG();
 902         return 0;
 903 }
 904 #endif /* pud_write */
 905 
 906 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
 907         (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
 908          !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
 909 static inline int pud_trans_huge(pud_t pud)
 910 {
 911         return 0;
 912 }
 913 #endif
 914 
 915 #ifndef pmd_read_atomic
 916 static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
 917 {
 918         /*
 919          * Depend on compiler for an atomic pmd read. NOTE: this is
 920          * only going to work, if the pmdval_t isn't larger than
 921          * an unsigned long.
 922          */
 923         return *pmdp;
 924 }
 925 #endif
 926 
 927 #ifndef arch_needs_pgtable_deposit
 928 #define arch_needs_pgtable_deposit() (false)
 929 #endif
 930 /*
 931  * This function is meant to be used by sites walking pagetables with
 932  * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
 933  * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
 934  * into a null pmd and the transhuge page fault can convert a null pmd
 935  * into an hugepmd or into a regular pmd (if the hugepage allocation
 936  * fails). While holding the mmap_sem in read mode the pmd becomes
 937  * stable and stops changing under us only if it's not null and not a
 938  * transhuge pmd. When those races occurs and this function makes a
 939  * difference vs the standard pmd_none_or_clear_bad, the result is
 940  * undefined so behaving like if the pmd was none is safe (because it
 941  * can return none anyway). The compiler level barrier() is critically
 942  * important to compute the two checks atomically on the same pmdval.
 943  *
 944  * For 32bit kernels with a 64bit large pmd_t this automatically takes
 945  * care of reading the pmd atomically to avoid SMP race conditions
 946  * against pmd_populate() when the mmap_sem is hold for reading by the
 947  * caller (a special atomic read not done by "gcc" as in the generic
 948  * version above, is also needed when THP is disabled because the page
 949  * fault can populate the pmd from under us).
 950  */
 951 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
 952 {
 953         pmd_t pmdval = pmd_read_atomic(pmd);
 954         /*
 955          * The barrier will stabilize the pmdval in a register or on
 956          * the stack so that it will stop changing under the code.
 957          *
 958          * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
 959          * pmd_read_atomic is allowed to return a not atomic pmdval
 960          * (for example pointing to an hugepage that has never been
 961          * mapped in the pmd). The below checks will only care about
 962          * the low part of the pmd with 32bit PAE x86 anyway, with the
 963          * exception of pmd_none(). So the important thing is that if
 964          * the low part of the pmd is found null, the high part will
 965          * be also null or the pmd_none() check below would be
 966          * confused.
 967          */
 968 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 969         barrier();
 970 #endif
 971         /*
 972          * !pmd_present() checks for pmd migration entries
 973          *
 974          * The complete check uses is_pmd_migration_entry() in linux/swapops.h
 975          * But using that requires moving current function and pmd_trans_unstable()
 976          * to linux/swapops.h to resovle dependency, which is too much code move.
 977          *
 978          * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
 979          * because !pmd_present() pages can only be under migration not swapped
 980          * out.
 981          *
 982          * pmd_none() is preseved for future condition checks on pmd migration
 983          * entries and not confusing with this function name, although it is
 984          * redundant with !pmd_present().
 985          */
 986         if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
 987                 (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
 988                 return 1;
 989         if (unlikely(pmd_bad(pmdval))) {
 990                 pmd_clear_bad(pmd);
 991                 return 1;
 992         }
 993         return 0;
 994 }
 995 
 996 /*
 997  * This is a noop if Transparent Hugepage Support is not built into
 998  * the kernel. Otherwise it is equivalent to
 999  * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
1000  * places that already verified the pmd is not none and they want to
1001  * walk ptes while holding the mmap sem in read mode (write mode don't
1002  * need this). If THP is not enabled, the pmd can't go away under the
1003  * code even if MADV_DONTNEED runs, but if THP is enabled we need to
1004  * run a pmd_trans_unstable before walking the ptes after
1005  * split_huge_pmd returns (because it may have run when the pmd become
1006  * null, but then a page fault can map in a THP and not a regular page).
1007  */
1008 static inline int pmd_trans_unstable(pmd_t *pmd)
1009 {
1010 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1011         return pmd_none_or_trans_huge_or_clear_bad(pmd);
1012 #else
1013         return 0;
1014 #endif
1015 }
1016 
1017 #ifndef CONFIG_NUMA_BALANCING
1018 /*
1019  * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
1020  * the only case the kernel cares is for NUMA balancing and is only ever set
1021  * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
1022  * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
1023  * is the responsibility of the caller to distinguish between PROT_NONE
1024  * protections and NUMA hinting fault protections.
1025  */
1026 static inline int pte_protnone(pte_t pte)
1027 {
1028         return 0;
1029 }
1030 
1031 static inline int pmd_protnone(pmd_t pmd)
1032 {
1033         return 0;
1034 }
1035 #endif /* CONFIG_NUMA_BALANCING */
1036 
1037 #endif /* CONFIG_MMU */
1038 
1039 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
1040 
1041 #ifndef __PAGETABLE_P4D_FOLDED
1042 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
1043 int p4d_clear_huge(p4d_t *p4d);
1044 #else
1045 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1046 {
1047         return 0;
1048 }
1049 static inline int p4d_clear_huge(p4d_t *p4d)
1050 {
1051         return 0;
1052 }
1053 #endif /* !__PAGETABLE_P4D_FOLDED */
1054 
1055 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1056 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1057 int pud_clear_huge(pud_t *pud);
1058 int pmd_clear_huge(pmd_t *pmd);
1059 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
1060 int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1061 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1062 #else   /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
1063 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1064 {
1065         return 0;
1066 }
1067 static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1068 {
1069         return 0;
1070 }
1071 static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1072 {
1073         return 0;
1074 }
1075 static inline int p4d_clear_huge(p4d_t *p4d)
1076 {
1077         return 0;
1078 }
1079 static inline int pud_clear_huge(pud_t *pud)
1080 {
1081         return 0;
1082 }
1083 static inline int pmd_clear_huge(pmd_t *pmd)
1084 {
1085         return 0;
1086 }
1087 static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1088 {
1089         return 0;
1090 }
1091 static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1092 {
1093         return 0;
1094 }
1095 static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1096 {
1097         return 0;
1098 }
1099 #endif  /* CONFIG_HAVE_ARCH_HUGE_VMAP */
1100 
1101 #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
1102 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1103 /*
1104  * ARCHes with special requirements for evicting THP backing TLB entries can
1105  * implement this. Otherwise also, it can help optimize normal TLB flush in
1106  * THP regime. stock flush_tlb_range() typically has optimization to nuke the
1107  * entire TLB TLB if flush span is greater than a threshold, which will
1108  * likely be true for a single huge page. Thus a single thp flush will
1109  * invalidate the entire TLB which is not desitable.
1110  * e.g. see arch/arc: flush_pmd_tlb_range
1111  */
1112 #define flush_pmd_tlb_range(vma, addr, end)     flush_tlb_range(vma, addr, end)
1113 #define flush_pud_tlb_range(vma, addr, end)     flush_tlb_range(vma, addr, end)
1114 #else
1115 #define flush_pmd_tlb_range(vma, addr, end)     BUILD_BUG()
1116 #define flush_pud_tlb_range(vma, addr, end)     BUILD_BUG()
1117 #endif
1118 #endif
1119 
1120 struct file;
1121 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1122                         unsigned long size, pgprot_t *vma_prot);
1123 
1124 #ifndef CONFIG_X86_ESPFIX64
1125 static inline void init_espfix_bsp(void) { }
1126 #endif
1127 
1128 extern void __init pgtable_cache_init(void);
1129 
1130 #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
1131 static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1132 {
1133         return true;
1134 }
1135 
1136 static inline bool arch_has_pfn_modify_check(void)
1137 {
1138         return false;
1139 }
1140 #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
1141 
1142 /*
1143  * Architecture PAGE_KERNEL_* fallbacks
1144  *
1145  * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
1146  * because they really don't support them, or the port needs to be updated to
1147  * reflect the required functionality. Below are a set of relatively safe
1148  * fallbacks, as best effort, which we can count on in lieu of the architectures
1149  * not defining them on their own yet.
1150  */
1151 
1152 #ifndef PAGE_KERNEL_RO
1153 # define PAGE_KERNEL_RO PAGE_KERNEL
1154 #endif
1155 
1156 #ifndef PAGE_KERNEL_EXEC
1157 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1158 #endif
1159 
1160 #endif /* !__ASSEMBLY__ */
1161 
1162 #ifndef io_remap_pfn_range
1163 #define io_remap_pfn_range remap_pfn_range
1164 #endif
1165 
1166 #ifndef has_transparent_hugepage
1167 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1168 #define has_transparent_hugepage() 1
1169 #else
1170 #define has_transparent_hugepage() 0
1171 #endif
1172 #endif
1173 
1174 /*
1175  * On some architectures it depends on the mm if the p4d/pud or pmd
1176  * layer of the page table hierarchy is folded or not.
1177  */
1178 #ifndef mm_p4d_folded
1179 #define mm_p4d_folded(mm)       __is_defined(__PAGETABLE_P4D_FOLDED)
1180 #endif
1181 
1182 #ifndef mm_pud_folded
1183 #define mm_pud_folded(mm)       __is_defined(__PAGETABLE_PUD_FOLDED)
1184 #endif
1185 
1186 #ifndef mm_pmd_folded
1187 #define mm_pmd_folded(mm)       __is_defined(__PAGETABLE_PMD_FOLDED)
1188 #endif
1189 
1190 #endif /* _ASM_GENERIC_PGTABLE_H */

/* [<][>][^][v][top][bottom][index][help] */