root/include/linux/page-flags.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. compound_head
  2. PageTail
  3. PageCompound
  4. PagePoisoned
  5. page_init_poison
  6. PAGEFLAG_FALSE
  7. PAGEFLAG_FALSE
  8. PageMappingFlags
  9. PageAnon
  10. __PageMovable
  11. PageKsm
  12. TESTPAGEFLAG_FALSE
  13. __SetPageUptodate
  14. SetPageUptodate
  15. set_page_writeback
  16. set_page_writeback_keepwrite
  17. __PAGEFLAG
  18. clear_compound_head
  19. ClearPageCompound
  20. TESTPAGEFLAG_FALSE
  21. PageTransHuge
  22. PageTransCompound
  23. PageTransCompoundMap
  24. PageTransTail
  25. PageDoubleMap
  26. SetPageDoubleMap
  27. ClearPageDoubleMap
  28. TestSetPageDoubleMap
  29. TestClearPageDoubleMap
  30. page_has_type
  31. PAGE_TYPE_OPS
  32. SetPageSlabPfmemalloc
  33. __ClearPageSlabPfmemalloc
  34. ClearPageSlabPfmemalloc
  35. page_has_private

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Macros for manipulating and testing page->flags
   4  */
   5 
   6 #ifndef PAGE_FLAGS_H
   7 #define PAGE_FLAGS_H
   8 
   9 #include <linux/types.h>
  10 #include <linux/bug.h>
  11 #include <linux/mmdebug.h>
  12 #ifndef __GENERATING_BOUNDS_H
  13 #include <linux/mm_types.h>
  14 #include <generated/bounds.h>
  15 #endif /* !__GENERATING_BOUNDS_H */
  16 
  17 /*
  18  * Various page->flags bits:
  19  *
  20  * PG_reserved is set for special pages. The "struct page" of such a page
  21  * should in general not be touched (e.g. set dirty) except by its owner.
  22  * Pages marked as PG_reserved include:
  23  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
  24  *   initrd, HW tables)
  25  * - Pages reserved or allocated early during boot (before the page allocator
  26  *   was initialized). This includes (depending on the architecture) the
  27  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
  28  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
  29  *   be given to the page allocator.
  30  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
  31  *   to read/write these pages might end badly. Don't touch!
  32  * - The zero page(s)
  33  * - Pages not added to the page allocator when onlining a section because
  34  *   they were excluded via the online_page_callback() or because they are
  35  *   PG_hwpoison.
  36  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
  37  *   control pages, vmcoreinfo)
  38  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
  39  *   not marked PG_reserved (as they might be in use by somebody else who does
  40  *   not respect the caching strategy).
  41  * - Pages part of an offline section (struct pages of offline sections should
  42  *   not be trusted as they will be initialized when first onlined).
  43  * - MCA pages on ia64
  44  * - Pages holding CPU notes for POWER Firmware Assisted Dump
  45  * - Device memory (e.g. PMEM, DAX, HMM)
  46  * Some PG_reserved pages will be excluded from the hibernation image.
  47  * PG_reserved does in general not hinder anybody from dumping or swapping
  48  * and is no longer required for remap_pfn_range(). ioremap might require it.
  49  * Consequently, PG_reserved for a page mapped into user space can indicate
  50  * the zero page, the vDSO, MMIO pages or device memory.
  51  *
  52  * The PG_private bitflag is set on pagecache pages if they contain filesystem
  53  * specific data (which is normally at page->private). It can be used by
  54  * private allocations for its own usage.
  55  *
  56  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
  57  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
  58  * is set before writeback starts and cleared when it finishes.
  59  *
  60  * PG_locked also pins a page in pagecache, and blocks truncation of the file
  61  * while it is held.
  62  *
  63  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
  64  * to become unlocked.
  65  *
  66  * PG_uptodate tells whether the page's contents is valid.  When a read
  67  * completes, the page becomes uptodate, unless a disk I/O error happened.
  68  *
  69  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
  70  * file-backed pagecache (see mm/vmscan.c).
  71  *
  72  * PG_error is set to indicate that an I/O error occurred on this page.
  73  *
  74  * PG_arch_1 is an architecture specific page state bit.  The generic code
  75  * guarantees that this bit is cleared for a page when it first is entered into
  76  * the page cache.
  77  *
  78  * PG_hwpoison indicates that a page got corrupted in hardware and contains
  79  * data with incorrect ECC bits that triggered a machine check. Accessing is
  80  * not safe since it may cause another machine check. Don't touch!
  81  */
  82 
  83 /*
  84  * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
  85  * locked- and dirty-page accounting.
  86  *
  87  * The page flags field is split into two parts, the main flags area
  88  * which extends from the low bits upwards, and the fields area which
  89  * extends from the high bits downwards.
  90  *
  91  *  | FIELD | ... | FLAGS |
  92  *  N-1           ^       0
  93  *               (NR_PAGEFLAGS)
  94  *
  95  * The fields area is reserved for fields mapping zone, node (for NUMA) and
  96  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
  97  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
  98  */
  99 enum pageflags {
 100         PG_locked,              /* Page is locked. Don't touch. */
 101         PG_referenced,
 102         PG_uptodate,
 103         PG_dirty,
 104         PG_lru,
 105         PG_active,
 106         PG_workingset,
 107         PG_waiters,             /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
 108         PG_error,
 109         PG_slab,
 110         PG_owner_priv_1,        /* Owner use. If pagecache, fs may use*/
 111         PG_arch_1,
 112         PG_reserved,
 113         PG_private,             /* If pagecache, has fs-private data */
 114         PG_private_2,           /* If pagecache, has fs aux data */
 115         PG_writeback,           /* Page is under writeback */
 116         PG_head,                /* A head page */
 117         PG_mappedtodisk,        /* Has blocks allocated on-disk */
 118         PG_reclaim,             /* To be reclaimed asap */
 119         PG_swapbacked,          /* Page is backed by RAM/swap */
 120         PG_unevictable,         /* Page is "unevictable"  */
 121 #ifdef CONFIG_MMU
 122         PG_mlocked,             /* Page is vma mlocked */
 123 #endif
 124 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
 125         PG_uncached,            /* Page has been mapped as uncached */
 126 #endif
 127 #ifdef CONFIG_MEMORY_FAILURE
 128         PG_hwpoison,            /* hardware poisoned page. Don't touch */
 129 #endif
 130 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
 131         PG_young,
 132         PG_idle,
 133 #endif
 134         __NR_PAGEFLAGS,
 135 
 136         /* Filesystems */
 137         PG_checked = PG_owner_priv_1,
 138 
 139         /* SwapBacked */
 140         PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
 141 
 142         /* Two page bits are conscripted by FS-Cache to maintain local caching
 143          * state.  These bits are set on pages belonging to the netfs's inodes
 144          * when those inodes are being locally cached.
 145          */
 146         PG_fscache = PG_private_2,      /* page backed by cache */
 147 
 148         /* XEN */
 149         /* Pinned in Xen as a read-only pagetable page. */
 150         PG_pinned = PG_owner_priv_1,
 151         /* Pinned as part of domain save (see xen_mm_pin_all()). */
 152         PG_savepinned = PG_dirty,
 153         /* Has a grant mapping of another (foreign) domain's page. */
 154         PG_foreign = PG_owner_priv_1,
 155         /* Remapped by swiotlb-xen. */
 156         PG_xen_remapped = PG_owner_priv_1,
 157 
 158         /* SLOB */
 159         PG_slob_free = PG_private,
 160 
 161         /* Compound pages. Stored in first tail page's flags */
 162         PG_double_map = PG_private_2,
 163 
 164         /* non-lru isolated movable page */
 165         PG_isolated = PG_reclaim,
 166 };
 167 
 168 #ifndef __GENERATING_BOUNDS_H
 169 
 170 struct page;    /* forward declaration */
 171 
 172 static inline struct page *compound_head(struct page *page)
 173 {
 174         unsigned long head = READ_ONCE(page->compound_head);
 175 
 176         if (unlikely(head & 1))
 177                 return (struct page *) (head - 1);
 178         return page;
 179 }
 180 
 181 static __always_inline int PageTail(struct page *page)
 182 {
 183         return READ_ONCE(page->compound_head) & 1;
 184 }
 185 
 186 static __always_inline int PageCompound(struct page *page)
 187 {
 188         return test_bit(PG_head, &page->flags) || PageTail(page);
 189 }
 190 
 191 #define PAGE_POISON_PATTERN     -1l
 192 static inline int PagePoisoned(const struct page *page)
 193 {
 194         return page->flags == PAGE_POISON_PATTERN;
 195 }
 196 
 197 #ifdef CONFIG_DEBUG_VM
 198 void page_init_poison(struct page *page, size_t size);
 199 #else
 200 static inline void page_init_poison(struct page *page, size_t size)
 201 {
 202 }
 203 #endif
 204 
 205 /*
 206  * Page flags policies wrt compound pages
 207  *
 208  * PF_POISONED_CHECK
 209  *     check if this struct page poisoned/uninitialized
 210  *
 211  * PF_ANY:
 212  *     the page flag is relevant for small, head and tail pages.
 213  *
 214  * PF_HEAD:
 215  *     for compound page all operations related to the page flag applied to
 216  *     head page.
 217  *
 218  * PF_ONLY_HEAD:
 219  *     for compound page, callers only ever operate on the head page.
 220  *
 221  * PF_NO_TAIL:
 222  *     modifications of the page flag must be done on small or head pages,
 223  *     checks can be done on tail pages too.
 224  *
 225  * PF_NO_COMPOUND:
 226  *     the page flag is not relevant for compound pages.
 227  */
 228 #define PF_POISONED_CHECK(page) ({                                      \
 229                 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);            \
 230                 page; })
 231 #define PF_ANY(page, enforce)   PF_POISONED_CHECK(page)
 232 #define PF_HEAD(page, enforce)  PF_POISONED_CHECK(compound_head(page))
 233 #define PF_ONLY_HEAD(page, enforce) ({                                  \
 234                 VM_BUG_ON_PGFLAGS(PageTail(page), page);                \
 235                 PF_POISONED_CHECK(page); })
 236 #define PF_NO_TAIL(page, enforce) ({                                    \
 237                 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);     \
 238                 PF_POISONED_CHECK(compound_head(page)); })
 239 #define PF_NO_COMPOUND(page, enforce) ({                                \
 240                 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
 241                 PF_POISONED_CHECK(page); })
 242 
 243 /*
 244  * Macros to create function definitions for page flags
 245  */
 246 #define TESTPAGEFLAG(uname, lname, policy)                              \
 247 static __always_inline int Page##uname(struct page *page)               \
 248         { return test_bit(PG_##lname, &policy(page, 0)->flags); }
 249 
 250 #define SETPAGEFLAG(uname, lname, policy)                               \
 251 static __always_inline void SetPage##uname(struct page *page)           \
 252         { set_bit(PG_##lname, &policy(page, 1)->flags); }
 253 
 254 #define CLEARPAGEFLAG(uname, lname, policy)                             \
 255 static __always_inline void ClearPage##uname(struct page *page)         \
 256         { clear_bit(PG_##lname, &policy(page, 1)->flags); }
 257 
 258 #define __SETPAGEFLAG(uname, lname, policy)                             \
 259 static __always_inline void __SetPage##uname(struct page *page)         \
 260         { __set_bit(PG_##lname, &policy(page, 1)->flags); }
 261 
 262 #define __CLEARPAGEFLAG(uname, lname, policy)                           \
 263 static __always_inline void __ClearPage##uname(struct page *page)       \
 264         { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
 265 
 266 #define TESTSETFLAG(uname, lname, policy)                               \
 267 static __always_inline int TestSetPage##uname(struct page *page)        \
 268         { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
 269 
 270 #define TESTCLEARFLAG(uname, lname, policy)                             \
 271 static __always_inline int TestClearPage##uname(struct page *page)      \
 272         { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
 273 
 274 #define PAGEFLAG(uname, lname, policy)                                  \
 275         TESTPAGEFLAG(uname, lname, policy)                              \
 276         SETPAGEFLAG(uname, lname, policy)                               \
 277         CLEARPAGEFLAG(uname, lname, policy)
 278 
 279 #define __PAGEFLAG(uname, lname, policy)                                \
 280         TESTPAGEFLAG(uname, lname, policy)                              \
 281         __SETPAGEFLAG(uname, lname, policy)                             \
 282         __CLEARPAGEFLAG(uname, lname, policy)
 283 
 284 #define TESTSCFLAG(uname, lname, policy)                                \
 285         TESTSETFLAG(uname, lname, policy)                               \
 286         TESTCLEARFLAG(uname, lname, policy)
 287 
 288 #define TESTPAGEFLAG_FALSE(uname)                                       \
 289 static inline int Page##uname(const struct page *page) { return 0; }
 290 
 291 #define SETPAGEFLAG_NOOP(uname)                                         \
 292 static inline void SetPage##uname(struct page *page) {  }
 293 
 294 #define CLEARPAGEFLAG_NOOP(uname)                                       \
 295 static inline void ClearPage##uname(struct page *page) {  }
 296 
 297 #define __CLEARPAGEFLAG_NOOP(uname)                                     \
 298 static inline void __ClearPage##uname(struct page *page) {  }
 299 
 300 #define TESTSETFLAG_FALSE(uname)                                        \
 301 static inline int TestSetPage##uname(struct page *page) { return 0; }
 302 
 303 #define TESTCLEARFLAG_FALSE(uname)                                      \
 304 static inline int TestClearPage##uname(struct page *page) { return 0; }
 305 
 306 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)                 \
 307         SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
 308 
 309 #define TESTSCFLAG_FALSE(uname)                                         \
 310         TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
 311 
 312 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
 313 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
 314 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
 315 PAGEFLAG(Referenced, referenced, PF_HEAD)
 316         TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
 317         __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
 318 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
 319         __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
 320 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
 321 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
 322         TESTCLEARFLAG(Active, active, PF_HEAD)
 323 PAGEFLAG(Workingset, workingset, PF_HEAD)
 324         TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
 325 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
 326 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
 327 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)         /* Used by some filesystems */
 328 
 329 /* Xen */
 330 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
 331         TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
 332 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
 333 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
 334 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
 335         TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
 336 
 337 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
 338         __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
 339         __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
 340 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
 341         __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
 342         __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
 343 
 344 /*
 345  * Private page markings that may be used by the filesystem that owns the page
 346  * for its own purposes.
 347  * - PG_private and PG_private_2 cause releasepage() and co to be invoked
 348  */
 349 PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
 350         __CLEARPAGEFLAG(Private, private, PF_ANY)
 351 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
 352 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
 353         TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
 354 
 355 /*
 356  * Only test-and-set exist for PG_writeback.  The unconditional operators are
 357  * risky: they bypass page accounting.
 358  */
 359 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
 360         TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
 361 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
 362 
 363 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
 364 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
 365         TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
 366 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
 367         TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
 368 
 369 #ifdef CONFIG_HIGHMEM
 370 /*
 371  * Must use a macro here due to header dependency issues. page_zone() is not
 372  * available at this point.
 373  */
 374 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
 375 #else
 376 PAGEFLAG_FALSE(HighMem)
 377 #endif
 378 
 379 #ifdef CONFIG_SWAP
 380 static __always_inline int PageSwapCache(struct page *page)
 381 {
 382 #ifdef CONFIG_THP_SWAP
 383         page = compound_head(page);
 384 #endif
 385         return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
 386 
 387 }
 388 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
 389 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
 390 #else
 391 PAGEFLAG_FALSE(SwapCache)
 392 #endif
 393 
 394 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
 395         __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
 396         TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
 397 
 398 #ifdef CONFIG_MMU
 399 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
 400         __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
 401         TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
 402 #else
 403 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
 404         TESTSCFLAG_FALSE(Mlocked)
 405 #endif
 406 
 407 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
 408 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
 409 #else
 410 PAGEFLAG_FALSE(Uncached)
 411 #endif
 412 
 413 #ifdef CONFIG_MEMORY_FAILURE
 414 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
 415 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
 416 #define __PG_HWPOISON (1UL << PG_hwpoison)
 417 extern bool set_hwpoison_free_buddy_page(struct page *page);
 418 #else
 419 PAGEFLAG_FALSE(HWPoison)
 420 static inline bool set_hwpoison_free_buddy_page(struct page *page)
 421 {
 422         return 0;
 423 }
 424 #define __PG_HWPOISON 0
 425 #endif
 426 
 427 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
 428 TESTPAGEFLAG(Young, young, PF_ANY)
 429 SETPAGEFLAG(Young, young, PF_ANY)
 430 TESTCLEARFLAG(Young, young, PF_ANY)
 431 PAGEFLAG(Idle, idle, PF_ANY)
 432 #endif
 433 
 434 /*
 435  * On an anonymous page mapped into a user virtual memory area,
 436  * page->mapping points to its anon_vma, not to a struct address_space;
 437  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
 438  *
 439  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
 440  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
 441  * bit; and then page->mapping points, not to an anon_vma, but to a private
 442  * structure which KSM associates with that merged page.  See ksm.h.
 443  *
 444  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
 445  * page and then page->mapping points a struct address_space.
 446  *
 447  * Please note that, confusingly, "page_mapping" refers to the inode
 448  * address_space which maps the page from disk; whereas "page_mapped"
 449  * refers to user virtual address space into which the page is mapped.
 450  */
 451 #define PAGE_MAPPING_ANON       0x1
 452 #define PAGE_MAPPING_MOVABLE    0x2
 453 #define PAGE_MAPPING_KSM        (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 454 #define PAGE_MAPPING_FLAGS      (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 455 
 456 static __always_inline int PageMappingFlags(struct page *page)
 457 {
 458         return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
 459 }
 460 
 461 static __always_inline int PageAnon(struct page *page)
 462 {
 463         page = compound_head(page);
 464         return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
 465 }
 466 
 467 static __always_inline int __PageMovable(struct page *page)
 468 {
 469         return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
 470                                 PAGE_MAPPING_MOVABLE;
 471 }
 472 
 473 #ifdef CONFIG_KSM
 474 /*
 475  * A KSM page is one of those write-protected "shared pages" or "merged pages"
 476  * which KSM maps into multiple mms, wherever identical anonymous page content
 477  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
 478  * anon_vma, but to that page's node of the stable tree.
 479  */
 480 static __always_inline int PageKsm(struct page *page)
 481 {
 482         page = compound_head(page);
 483         return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
 484                                 PAGE_MAPPING_KSM;
 485 }
 486 #else
 487 TESTPAGEFLAG_FALSE(Ksm)
 488 #endif
 489 
 490 u64 stable_page_flags(struct page *page);
 491 
 492 static inline int PageUptodate(struct page *page)
 493 {
 494         int ret;
 495         page = compound_head(page);
 496         ret = test_bit(PG_uptodate, &(page)->flags);
 497         /*
 498          * Must ensure that the data we read out of the page is loaded
 499          * _after_ we've loaded page->flags to check for PageUptodate.
 500          * We can skip the barrier if the page is not uptodate, because
 501          * we wouldn't be reading anything from it.
 502          *
 503          * See SetPageUptodate() for the other side of the story.
 504          */
 505         if (ret)
 506                 smp_rmb();
 507 
 508         return ret;
 509 }
 510 
 511 static __always_inline void __SetPageUptodate(struct page *page)
 512 {
 513         VM_BUG_ON_PAGE(PageTail(page), page);
 514         smp_wmb();
 515         __set_bit(PG_uptodate, &page->flags);
 516 }
 517 
 518 static __always_inline void SetPageUptodate(struct page *page)
 519 {
 520         VM_BUG_ON_PAGE(PageTail(page), page);
 521         /*
 522          * Memory barrier must be issued before setting the PG_uptodate bit,
 523          * so that all previous stores issued in order to bring the page
 524          * uptodate are actually visible before PageUptodate becomes true.
 525          */
 526         smp_wmb();
 527         set_bit(PG_uptodate, &page->flags);
 528 }
 529 
 530 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
 531 
 532 int test_clear_page_writeback(struct page *page);
 533 int __test_set_page_writeback(struct page *page, bool keep_write);
 534 
 535 #define test_set_page_writeback(page)                   \
 536         __test_set_page_writeback(page, false)
 537 #define test_set_page_writeback_keepwrite(page) \
 538         __test_set_page_writeback(page, true)
 539 
 540 static inline void set_page_writeback(struct page *page)
 541 {
 542         test_set_page_writeback(page);
 543 }
 544 
 545 static inline void set_page_writeback_keepwrite(struct page *page)
 546 {
 547         test_set_page_writeback_keepwrite(page);
 548 }
 549 
 550 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
 551 
 552 static __always_inline void set_compound_head(struct page *page, struct page *head)
 553 {
 554         WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
 555 }
 556 
 557 static __always_inline void clear_compound_head(struct page *page)
 558 {
 559         WRITE_ONCE(page->compound_head, 0);
 560 }
 561 
 562 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 563 static inline void ClearPageCompound(struct page *page)
 564 {
 565         BUG_ON(!PageHead(page));
 566         ClearPageHead(page);
 567 }
 568 #endif
 569 
 570 #define PG_head_mask ((1UL << PG_head))
 571 
 572 #ifdef CONFIG_HUGETLB_PAGE
 573 int PageHuge(struct page *page);
 574 int PageHeadHuge(struct page *page);
 575 bool page_huge_active(struct page *page);
 576 #else
 577 TESTPAGEFLAG_FALSE(Huge)
 578 TESTPAGEFLAG_FALSE(HeadHuge)
 579 
 580 static inline bool page_huge_active(struct page *page)
 581 {
 582         return 0;
 583 }
 584 #endif
 585 
 586 
 587 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 588 /*
 589  * PageHuge() only returns true for hugetlbfs pages, but not for
 590  * normal or transparent huge pages.
 591  *
 592  * PageTransHuge() returns true for both transparent huge and
 593  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
 594  * called only in the core VM paths where hugetlbfs pages can't exist.
 595  */
 596 static inline int PageTransHuge(struct page *page)
 597 {
 598         VM_BUG_ON_PAGE(PageTail(page), page);
 599         return PageHead(page);
 600 }
 601 
 602 /*
 603  * PageTransCompound returns true for both transparent huge pages
 604  * and hugetlbfs pages, so it should only be called when it's known
 605  * that hugetlbfs pages aren't involved.
 606  */
 607 static inline int PageTransCompound(struct page *page)
 608 {
 609         return PageCompound(page);
 610 }
 611 
 612 /*
 613  * PageTransCompoundMap is the same as PageTransCompound, but it also
 614  * guarantees the primary MMU has the entire compound page mapped
 615  * through pmd_trans_huge, which in turn guarantees the secondary MMUs
 616  * can also map the entire compound page. This allows the secondary
 617  * MMUs to call get_user_pages() only once for each compound page and
 618  * to immediately map the entire compound page with a single secondary
 619  * MMU fault. If there will be a pmd split later, the secondary MMUs
 620  * will get an update through the MMU notifier invalidation through
 621  * split_huge_pmd().
 622  *
 623  * Unlike PageTransCompound, this is safe to be called only while
 624  * split_huge_pmd() cannot run from under us, like if protected by the
 625  * MMU notifier, otherwise it may result in page->_mapcount check false
 626  * positives.
 627  *
 628  * We have to treat page cache THP differently since every subpage of it
 629  * would get _mapcount inc'ed once it is PMD mapped.  But, it may be PTE
 630  * mapped in the current process so comparing subpage's _mapcount to
 631  * compound_mapcount to filter out PTE mapped case.
 632  */
 633 static inline int PageTransCompoundMap(struct page *page)
 634 {
 635         struct page *head;
 636 
 637         if (!PageTransCompound(page))
 638                 return 0;
 639 
 640         if (PageAnon(page))
 641                 return atomic_read(&page->_mapcount) < 0;
 642 
 643         head = compound_head(page);
 644         /* File THP is PMD mapped and not PTE mapped */
 645         return atomic_read(&page->_mapcount) ==
 646                atomic_read(compound_mapcount_ptr(head));
 647 }
 648 
 649 /*
 650  * PageTransTail returns true for both transparent huge pages
 651  * and hugetlbfs pages, so it should only be called when it's known
 652  * that hugetlbfs pages aren't involved.
 653  */
 654 static inline int PageTransTail(struct page *page)
 655 {
 656         return PageTail(page);
 657 }
 658 
 659 /*
 660  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
 661  * as PMDs.
 662  *
 663  * This is required for optimization of rmap operations for THP: we can postpone
 664  * per small page mapcount accounting (and its overhead from atomic operations)
 665  * until the first PMD split.
 666  *
 667  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
 668  * by one. This reference will go away with last compound_mapcount.
 669  *
 670  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
 671  */
 672 static inline int PageDoubleMap(struct page *page)
 673 {
 674         return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
 675 }
 676 
 677 static inline void SetPageDoubleMap(struct page *page)
 678 {
 679         VM_BUG_ON_PAGE(!PageHead(page), page);
 680         set_bit(PG_double_map, &page[1].flags);
 681 }
 682 
 683 static inline void ClearPageDoubleMap(struct page *page)
 684 {
 685         VM_BUG_ON_PAGE(!PageHead(page), page);
 686         clear_bit(PG_double_map, &page[1].flags);
 687 }
 688 static inline int TestSetPageDoubleMap(struct page *page)
 689 {
 690         VM_BUG_ON_PAGE(!PageHead(page), page);
 691         return test_and_set_bit(PG_double_map, &page[1].flags);
 692 }
 693 
 694 static inline int TestClearPageDoubleMap(struct page *page)
 695 {
 696         VM_BUG_ON_PAGE(!PageHead(page), page);
 697         return test_and_clear_bit(PG_double_map, &page[1].flags);
 698 }
 699 
 700 #else
 701 TESTPAGEFLAG_FALSE(TransHuge)
 702 TESTPAGEFLAG_FALSE(TransCompound)
 703 TESTPAGEFLAG_FALSE(TransCompoundMap)
 704 TESTPAGEFLAG_FALSE(TransTail)
 705 PAGEFLAG_FALSE(DoubleMap)
 706         TESTSETFLAG_FALSE(DoubleMap)
 707         TESTCLEARFLAG_FALSE(DoubleMap)
 708 #endif
 709 
 710 /*
 711  * For pages that are never mapped to userspace (and aren't PageSlab),
 712  * page_type may be used.  Because it is initialised to -1, we invert the
 713  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
 714  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
 715  * low bits so that an underflow or overflow of page_mapcount() won't be
 716  * mistaken for a page type value.
 717  */
 718 
 719 #define PAGE_TYPE_BASE  0xf0000000
 720 /* Reserve              0x0000007f to catch underflows of page_mapcount */
 721 #define PAGE_MAPCOUNT_RESERVE   -128
 722 #define PG_buddy        0x00000080
 723 #define PG_offline      0x00000100
 724 #define PG_kmemcg       0x00000200
 725 #define PG_table        0x00000400
 726 #define PG_guard        0x00000800
 727 
 728 #define PageType(page, flag)                                            \
 729         ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
 730 
 731 static inline int page_has_type(struct page *page)
 732 {
 733         return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
 734 }
 735 
 736 #define PAGE_TYPE_OPS(uname, lname)                                     \
 737 static __always_inline int Page##uname(struct page *page)               \
 738 {                                                                       \
 739         return PageType(page, PG_##lname);                              \
 740 }                                                                       \
 741 static __always_inline void __SetPage##uname(struct page *page)         \
 742 {                                                                       \
 743         VM_BUG_ON_PAGE(!PageType(page, 0), page);                       \
 744         page->page_type &= ~PG_##lname;                                 \
 745 }                                                                       \
 746 static __always_inline void __ClearPage##uname(struct page *page)       \
 747 {                                                                       \
 748         VM_BUG_ON_PAGE(!Page##uname(page), page);                       \
 749         page->page_type |= PG_##lname;                                  \
 750 }
 751 
 752 /*
 753  * PageBuddy() indicates that the page is free and in the buddy system
 754  * (see mm/page_alloc.c).
 755  */
 756 PAGE_TYPE_OPS(Buddy, buddy)
 757 
 758 /*
 759  * PageOffline() indicates that the page is logically offline although the
 760  * containing section is online. (e.g. inflated in a balloon driver or
 761  * not onlined when onlining the section).
 762  * The content of these pages is effectively stale. Such pages should not
 763  * be touched (read/write/dump/save) except by their owner.
 764  */
 765 PAGE_TYPE_OPS(Offline, offline)
 766 
 767 /*
 768  * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
 769  * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
 770  */
 771 PAGE_TYPE_OPS(Kmemcg, kmemcg)
 772 
 773 /*
 774  * Marks pages in use as page tables.
 775  */
 776 PAGE_TYPE_OPS(Table, table)
 777 
 778 /*
 779  * Marks guardpages used with debug_pagealloc.
 780  */
 781 PAGE_TYPE_OPS(Guard, guard)
 782 
 783 extern bool is_free_buddy_page(struct page *page);
 784 
 785 __PAGEFLAG(Isolated, isolated, PF_ANY);
 786 
 787 /*
 788  * If network-based swap is enabled, sl*b must keep track of whether pages
 789  * were allocated from pfmemalloc reserves.
 790  */
 791 static inline int PageSlabPfmemalloc(struct page *page)
 792 {
 793         VM_BUG_ON_PAGE(!PageSlab(page), page);
 794         return PageActive(page);
 795 }
 796 
 797 static inline void SetPageSlabPfmemalloc(struct page *page)
 798 {
 799         VM_BUG_ON_PAGE(!PageSlab(page), page);
 800         SetPageActive(page);
 801 }
 802 
 803 static inline void __ClearPageSlabPfmemalloc(struct page *page)
 804 {
 805         VM_BUG_ON_PAGE(!PageSlab(page), page);
 806         __ClearPageActive(page);
 807 }
 808 
 809 static inline void ClearPageSlabPfmemalloc(struct page *page)
 810 {
 811         VM_BUG_ON_PAGE(!PageSlab(page), page);
 812         ClearPageActive(page);
 813 }
 814 
 815 #ifdef CONFIG_MMU
 816 #define __PG_MLOCKED            (1UL << PG_mlocked)
 817 #else
 818 #define __PG_MLOCKED            0
 819 #endif
 820 
 821 /*
 822  * Flags checked when a page is freed.  Pages being freed should not have
 823  * these flags set.  It they are, there is a problem.
 824  */
 825 #define PAGE_FLAGS_CHECK_AT_FREE                                \
 826         (1UL << PG_lru          | 1UL << PG_locked      |       \
 827          1UL << PG_private      | 1UL << PG_private_2   |       \
 828          1UL << PG_writeback    | 1UL << PG_reserved    |       \
 829          1UL << PG_slab         | 1UL << PG_active      |       \
 830          1UL << PG_unevictable  | __PG_MLOCKED)
 831 
 832 /*
 833  * Flags checked when a page is prepped for return by the page allocator.
 834  * Pages being prepped should not have these flags set.  It they are set,
 835  * there has been a kernel bug or struct page corruption.
 836  *
 837  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
 838  * alloc-free cycle to prevent from reusing the page.
 839  */
 840 #define PAGE_FLAGS_CHECK_AT_PREP        \
 841         (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
 842 
 843 #define PAGE_FLAGS_PRIVATE                              \
 844         (1UL << PG_private | 1UL << PG_private_2)
 845 /**
 846  * page_has_private - Determine if page has private stuff
 847  * @page: The page to be checked
 848  *
 849  * Determine if a page has private stuff, indicating that release routines
 850  * should be invoked upon it.
 851  */
 852 static inline int page_has_private(struct page *page)
 853 {
 854         return !!(page->flags & PAGE_FLAGS_PRIVATE);
 855 }
 856 
 857 #undef PF_ANY
 858 #undef PF_HEAD
 859 #undef PF_ONLY_HEAD
 860 #undef PF_NO_TAIL
 861 #undef PF_NO_COMPOUND
 862 #endif /* !__GENERATING_BOUNDS_H */
 863 
 864 #endif  /* PAGE_FLAGS_H */

/* [<][>][^][v][top][bottom][index][help] */