linux/include/linux/page-flags.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Macros for manipulating and testing page->flags
   4 */
   5
   6#ifndef PAGE_FLAGS_H
   7#define PAGE_FLAGS_H
   8
   9#include <linux/types.h>
  10#include <linux/bug.h>
  11#include <linux/mmdebug.h>
  12#ifndef __GENERATING_BOUNDS_H
  13#include <linux/mm_types.h>
  14#include <generated/bounds.h>
  15#endif /* !__GENERATING_BOUNDS_H */
  16
  17/*
  18 * Various page->flags bits:
  19 *
  20 * PG_reserved is set for special pages, which can never be swapped out. Some
  21 * of them might not even exist...
  22 *
  23 * The PG_private bitflag is set on pagecache pages if they contain filesystem
  24 * specific data (which is normally at page->private). It can be used by
  25 * private allocations for its own usage.
  26 *
  27 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
  28 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
  29 * is set before writeback starts and cleared when it finishes.
  30 *
  31 * PG_locked also pins a page in pagecache, and blocks truncation of the file
  32 * while it is held.
  33 *
  34 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
  35 * to become unlocked.
  36 *
  37 * PG_uptodate tells whether the page's contents is valid.  When a read
  38 * completes, the page becomes uptodate, unless a disk I/O error happened.
  39 *
  40 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
  41 * file-backed pagecache (see mm/vmscan.c).
  42 *
  43 * PG_error is set to indicate that an I/O error occurred on this page.
  44 *
  45 * PG_arch_1 is an architecture specific page state bit.  The generic code
  46 * guarantees that this bit is cleared for a page when it first is entered into
  47 * the page cache.
  48 *
  49 * PG_hwpoison indicates that a page got corrupted in hardware and contains
  50 * data with incorrect ECC bits that triggered a machine check. Accessing is
  51 * not safe since it may cause another machine check. Don't touch!
  52 */
  53
  54/*
  55 * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
  56 * locked- and dirty-page accounting.
  57 *
  58 * The page flags field is split into two parts, the main flags area
  59 * which extends from the low bits upwards, and the fields area which
  60 * extends from the high bits downwards.
  61 *
  62 *  | FIELD | ... | FLAGS |
  63 *  N-1           ^       0
  64 *               (NR_PAGEFLAGS)
  65 *
  66 * The fields area is reserved for fields mapping zone, node (for NUMA) and
  67 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
  68 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
  69 */
  70enum pageflags {
  71        PG_locked,              /* Page is locked. Don't touch. */
  72        PG_error,
  73        PG_referenced,
  74        PG_uptodate,
  75        PG_dirty,
  76        PG_lru,
  77        PG_active,
  78        PG_waiters,             /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
  79        PG_slab,
  80        PG_owner_priv_1,        /* Owner use. If pagecache, fs may use*/
  81        PG_arch_1,
  82        PG_reserved,
  83        PG_private,             /* If pagecache, has fs-private data */
  84        PG_private_2,           /* If pagecache, has fs aux data */
  85        PG_writeback,           /* Page is under writeback */
  86        PG_head,                /* A head page */
  87        PG_mappedtodisk,        /* Has blocks allocated on-disk */
  88        PG_reclaim,             /* To be reclaimed asap */
  89        PG_swapbacked,          /* Page is backed by RAM/swap */
  90        PG_unevictable,         /* Page is "unevictable"  */
  91#ifdef CONFIG_MMU
  92        PG_mlocked,             /* Page is vma mlocked */
  93#endif
  94#ifdef CONFIG_ARCH_USES_PG_UNCACHED
  95        PG_uncached,            /* Page has been mapped as uncached */
  96#endif
  97#ifdef CONFIG_MEMORY_FAILURE
  98        PG_hwpoison,            /* hardware poisoned page. Don't touch */
  99#endif
 100#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
 101        PG_young,
 102        PG_idle,
 103#endif
 104        __NR_PAGEFLAGS,
 105
 106        /* Filesystems */
 107        PG_checked = PG_owner_priv_1,
 108
 109        /* SwapBacked */
 110        PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
 111
 112        /* Two page bits are conscripted by FS-Cache to maintain local caching
 113         * state.  These bits are set on pages belonging to the netfs's inodes
 114         * when those inodes are being locally cached.
 115         */
 116        PG_fscache = PG_private_2,      /* page backed by cache */
 117
 118        /* XEN */
 119        /* Pinned in Xen as a read-only pagetable page. */
 120        PG_pinned = PG_owner_priv_1,
 121        /* Pinned as part of domain save (see xen_mm_pin_all()). */
 122        PG_savepinned = PG_dirty,
 123        /* Has a grant mapping of another (foreign) domain's page. */
 124        PG_foreign = PG_owner_priv_1,
 125
 126        /* SLOB */
 127        PG_slob_free = PG_private,
 128
 129        /* Compound pages. Stored in first tail page's flags */
 130        PG_double_map = PG_private_2,
 131
 132        /* non-lru isolated movable page */
 133        PG_isolated = PG_reclaim,
 134};
 135
 136#ifndef __GENERATING_BOUNDS_H
 137
 138struct page;    /* forward declaration */
 139
 140static inline struct page *compound_head(struct page *page)
 141{
 142        unsigned long head = READ_ONCE(page->compound_head);
 143
 144        if (unlikely(head & 1))
 145                return (struct page *) (head - 1);
 146        return page;
 147}
 148
 149static __always_inline int PageTail(struct page *page)
 150{
 151        return READ_ONCE(page->compound_head) & 1;
 152}
 153
 154static __always_inline int PageCompound(struct page *page)
 155{
 156        return test_bit(PG_head, &page->flags) || PageTail(page);
 157}
 158
 159#define PAGE_POISON_PATTERN     -1l
 160static inline int PagePoisoned(const struct page *page)
 161{
 162        return page->flags == PAGE_POISON_PATTERN;
 163}
 164
 165/*
 166 * Page flags policies wrt compound pages
 167 *
 168 * PF_POISONED_CHECK
 169 *     check if this struct page poisoned/uninitialized
 170 *
 171 * PF_ANY:
 172 *     the page flag is relevant for small, head and tail pages.
 173 *
 174 * PF_HEAD:
 175 *     for compound page all operations related to the page flag applied to
 176 *     head page.
 177 *
 178 * PF_ONLY_HEAD:
 179 *     for compound page, callers only ever operate on the head page.
 180 *
 181 * PF_NO_TAIL:
 182 *     modifications of the page flag must be done on small or head pages,
 183 *     checks can be done on tail pages too.
 184 *
 185 * PF_NO_COMPOUND:
 186 *     the page flag is not relevant for compound pages.
 187 */
 188#define PF_POISONED_CHECK(page) ({                                      \
 189                VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);            \
 190                page; })
 191#define PF_ANY(page, enforce)   PF_POISONED_CHECK(page)
 192#define PF_HEAD(page, enforce)  PF_POISONED_CHECK(compound_head(page))
 193#define PF_ONLY_HEAD(page, enforce) ({                                  \
 194                VM_BUG_ON_PGFLAGS(PageTail(page), page);                \
 195                PF_POISONED_CHECK(page); })
 196#define PF_NO_TAIL(page, enforce) ({                                    \
 197                VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);     \
 198                PF_POISONED_CHECK(compound_head(page)); })
 199#define PF_NO_COMPOUND(page, enforce) ({                                \
 200                VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
 201                PF_POISONED_CHECK(page); })
 202
 203/*
 204 * Macros to create function definitions for page flags
 205 */
 206#define TESTPAGEFLAG(uname, lname, policy)                              \
 207static __always_inline int Page##uname(struct page *page)               \
 208        { return test_bit(PG_##lname, &policy(page, 0)->flags); }
 209
 210#define SETPAGEFLAG(uname, lname, policy)                               \
 211static __always_inline void SetPage##uname(struct page *page)           \
 212        { set_bit(PG_##lname, &policy(page, 1)->flags); }
 213
 214#define CLEARPAGEFLAG(uname, lname, policy)                             \
 215static __always_inline void ClearPage##uname(struct page *page)         \
 216        { clear_bit(PG_##lname, &policy(page, 1)->flags); }
 217
 218#define __SETPAGEFLAG(uname, lname, policy)                             \
 219static __always_inline void __SetPage##uname(struct page *page)         \
 220        { __set_bit(PG_##lname, &policy(page, 1)->flags); }
 221
 222#define __CLEARPAGEFLAG(uname, lname, policy)                           \
 223static __always_inline void __ClearPage##uname(struct page *page)       \
 224        { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
 225
 226#define TESTSETFLAG(uname, lname, policy)                               \
 227static __always_inline int TestSetPage##uname(struct page *page)        \
 228        { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
 229
 230#define TESTCLEARFLAG(uname, lname, policy)                             \
 231static __always_inline int TestClearPage##uname(struct page *page)      \
 232        { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
 233
 234#define PAGEFLAG(uname, lname, policy)                                  \
 235        TESTPAGEFLAG(uname, lname, policy)                              \
 236        SETPAGEFLAG(uname, lname, policy)                               \
 237        CLEARPAGEFLAG(uname, lname, policy)
 238
 239#define __PAGEFLAG(uname, lname, policy)                                \
 240        TESTPAGEFLAG(uname, lname, policy)                              \
 241        __SETPAGEFLAG(uname, lname, policy)                             \
 242        __CLEARPAGEFLAG(uname, lname, policy)
 243
 244#define TESTSCFLAG(uname, lname, policy)                                \
 245        TESTSETFLAG(uname, lname, policy)                               \
 246        TESTCLEARFLAG(uname, lname, policy)
 247
 248#define TESTPAGEFLAG_FALSE(uname)                                       \
 249static inline int Page##uname(const struct page *page) { return 0; }
 250
 251#define SETPAGEFLAG_NOOP(uname)                                         \
 252static inline void SetPage##uname(struct page *page) {  }
 253
 254#define CLEARPAGEFLAG_NOOP(uname)                                       \
 255static inline void ClearPage##uname(struct page *page) {  }
 256
 257#define __CLEARPAGEFLAG_NOOP(uname)                                     \
 258static inline void __ClearPage##uname(struct page *page) {  }
 259
 260#define TESTSETFLAG_FALSE(uname)                                        \
 261static inline int TestSetPage##uname(struct page *page) { return 0; }
 262
 263#define TESTCLEARFLAG_FALSE(uname)                                      \
 264static inline int TestClearPage##uname(struct page *page) { return 0; }
 265
 266#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)                 \
 267        SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
 268
 269#define TESTSCFLAG_FALSE(uname)                                         \
 270        TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
 271
 272__PAGEFLAG(Locked, locked, PF_NO_TAIL)
 273PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
 274PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
 275PAGEFLAG(Referenced, referenced, PF_HEAD)
 276        TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
 277        __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
 278PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
 279        __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
 280PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
 281PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
 282        TESTCLEARFLAG(Active, active, PF_HEAD)
 283__PAGEFLAG(Slab, slab, PF_NO_TAIL)
 284__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
 285PAGEFLAG(Checked, checked, PF_NO_COMPOUND)         /* Used by some filesystems */
 286
 287/* Xen */
 288PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
 289        TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
 290PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
 291PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
 292
 293PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
 294        __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
 295PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
 296        __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
 297        __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
 298
 299/*
 300 * Private page markings that may be used by the filesystem that owns the page
 301 * for its own purposes.
 302 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
 303 */
 304PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
 305        __CLEARPAGEFLAG(Private, private, PF_ANY)
 306PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
 307PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
 308        TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
 309
 310/*
 311 * Only test-and-set exist for PG_writeback.  The unconditional operators are
 312 * risky: they bypass page accounting.
 313 */
 314TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
 315        TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
 316PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
 317
 318/* PG_readahead is only used for reads; PG_reclaim is only for writes */
 319PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
 320        TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
 321PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
 322        TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
 323
 324#ifdef CONFIG_HIGHMEM
 325/*
 326 * Must use a macro here due to header dependency issues. page_zone() is not
 327 * available at this point.
 328 */
 329#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
 330#else
 331PAGEFLAG_FALSE(HighMem)
 332#endif
 333
 334#ifdef CONFIG_SWAP
 335static __always_inline int PageSwapCache(struct page *page)
 336{
 337#ifdef CONFIG_THP_SWAP
 338        page = compound_head(page);
 339#endif
 340        return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
 341
 342}
 343SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
 344CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
 345#else
 346PAGEFLAG_FALSE(SwapCache)
 347#endif
 348
 349PAGEFLAG(Unevictable, unevictable, PF_HEAD)
 350        __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
 351        TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
 352
 353#ifdef CONFIG_MMU
 354PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
 355        __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
 356        TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
 357#else
 358PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
 359        TESTSCFLAG_FALSE(Mlocked)
 360#endif
 361
 362#ifdef CONFIG_ARCH_USES_PG_UNCACHED
 363PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
 364#else
 365PAGEFLAG_FALSE(Uncached)
 366#endif
 367
 368#ifdef CONFIG_MEMORY_FAILURE
 369PAGEFLAG(HWPoison, hwpoison, PF_ANY)
 370TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
 371#define __PG_HWPOISON (1UL << PG_hwpoison)
 372extern bool set_hwpoison_free_buddy_page(struct page *page);
 373#else
 374PAGEFLAG_FALSE(HWPoison)
 375static inline bool set_hwpoison_free_buddy_page(struct page *page)
 376{
 377        return 0;
 378}
 379#define __PG_HWPOISON 0
 380#endif
 381
 382#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
 383TESTPAGEFLAG(Young, young, PF_ANY)
 384SETPAGEFLAG(Young, young, PF_ANY)
 385TESTCLEARFLAG(Young, young, PF_ANY)
 386PAGEFLAG(Idle, idle, PF_ANY)
 387#endif
 388
 389/*
 390 * On an anonymous page mapped into a user virtual memory area,
 391 * page->mapping points to its anon_vma, not to a struct address_space;
 392 * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
 393 *
 394 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
 395 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
 396 * bit; and then page->mapping points, not to an anon_vma, but to a private
 397 * structure which KSM associates with that merged page.  See ksm.h.
 398 *
 399 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
 400 * page and then page->mapping points a struct address_space.
 401 *
 402 * Please note that, confusingly, "page_mapping" refers to the inode
 403 * address_space which maps the page from disk; whereas "page_mapped"
 404 * refers to user virtual address space into which the page is mapped.
 405 */
 406#define PAGE_MAPPING_ANON       0x1
 407#define PAGE_MAPPING_MOVABLE    0x2
 408#define PAGE_MAPPING_KSM        (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 409#define PAGE_MAPPING_FLAGS      (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 410
 411static __always_inline int PageMappingFlags(struct page *page)
 412{
 413        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
 414}
 415
 416static __always_inline int PageAnon(struct page *page)
 417{
 418        page = compound_head(page);
 419        return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
 420}
 421
 422static __always_inline int __PageMovable(struct page *page)
 423{
 424        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
 425                                PAGE_MAPPING_MOVABLE;
 426}
 427
 428#ifdef CONFIG_KSM
 429/*
 430 * A KSM page is one of those write-protected "shared pages" or "merged pages"
 431 * which KSM maps into multiple mms, wherever identical anonymous page content
 432 * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
 433 * anon_vma, but to that page's node of the stable tree.
 434 */
 435static __always_inline int PageKsm(struct page *page)
 436{
 437        page = compound_head(page);
 438        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
 439                                PAGE_MAPPING_KSM;
 440}
 441#else
 442TESTPAGEFLAG_FALSE(Ksm)
 443#endif
 444
 445u64 stable_page_flags(struct page *page);
 446
 447static inline int PageUptodate(struct page *page)
 448{
 449        int ret;
 450        page = compound_head(page);
 451        ret = test_bit(PG_uptodate, &(page)->flags);
 452        /*
 453         * Must ensure that the data we read out of the page is loaded
 454         * _after_ we've loaded page->flags to check for PageUptodate.
 455         * We can skip the barrier if the page is not uptodate, because
 456         * we wouldn't be reading anything from it.
 457         *
 458         * See SetPageUptodate() for the other side of the story.
 459         */
 460        if (ret)
 461                smp_rmb();
 462
 463        return ret;
 464}
 465
 466static __always_inline void __SetPageUptodate(struct page *page)
 467{
 468        VM_BUG_ON_PAGE(PageTail(page), page);
 469        smp_wmb();
 470        __set_bit(PG_uptodate, &page->flags);
 471}
 472
 473static __always_inline void SetPageUptodate(struct page *page)
 474{
 475        VM_BUG_ON_PAGE(PageTail(page), page);
 476        /*
 477         * Memory barrier must be issued before setting the PG_uptodate bit,
 478         * so that all previous stores issued in order to bring the page
 479         * uptodate are actually visible before PageUptodate becomes true.
 480         */
 481        smp_wmb();
 482        set_bit(PG_uptodate, &page->flags);
 483}
 484
 485CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
 486
 487int test_clear_page_writeback(struct page *page);
 488int __test_set_page_writeback(struct page *page, bool keep_write);
 489
 490#define test_set_page_writeback(page)                   \
 491        __test_set_page_writeback(page, false)
 492#define test_set_page_writeback_keepwrite(page) \
 493        __test_set_page_writeback(page, true)
 494
 495static inline void set_page_writeback(struct page *page)
 496{
 497        test_set_page_writeback(page);
 498}
 499
 500static inline void set_page_writeback_keepwrite(struct page *page)
 501{
 502        test_set_page_writeback_keepwrite(page);
 503}
 504
 505__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
 506
 507static __always_inline void set_compound_head(struct page *page, struct page *head)
 508{
 509        WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
 510}
 511
 512static __always_inline void clear_compound_head(struct page *page)
 513{
 514        WRITE_ONCE(page->compound_head, 0);
 515}
 516
 517#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 518static inline void ClearPageCompound(struct page *page)
 519{
 520        BUG_ON(!PageHead(page));
 521        ClearPageHead(page);
 522}
 523#endif
 524
 525#define PG_head_mask ((1UL << PG_head))
 526
 527#ifdef CONFIG_HUGETLB_PAGE
 528int PageHuge(struct page *page);
 529int PageHeadHuge(struct page *page);
 530bool page_huge_active(struct page *page);
 531#else
 532TESTPAGEFLAG_FALSE(Huge)
 533TESTPAGEFLAG_FALSE(HeadHuge)
 534
 535static inline bool page_huge_active(struct page *page)
 536{
 537        return 0;
 538}
 539#endif
 540
 541
 542#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 543/*
 544 * PageHuge() only returns true for hugetlbfs pages, but not for
 545 * normal or transparent huge pages.
 546 *
 547 * PageTransHuge() returns true for both transparent huge and
 548 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
 549 * called only in the core VM paths where hugetlbfs pages can't exist.
 550 */
 551static inline int PageTransHuge(struct page *page)
 552{
 553        VM_BUG_ON_PAGE(PageTail(page), page);
 554        return PageHead(page);
 555}
 556
 557/*
 558 * PageTransCompound returns true for both transparent huge pages
 559 * and hugetlbfs pages, so it should only be called when it's known
 560 * that hugetlbfs pages aren't involved.
 561 */
 562static inline int PageTransCompound(struct page *page)
 563{
 564        return PageCompound(page);
 565}
 566
 567/*
 568 * PageTransCompoundMap is the same as PageTransCompound, but it also
 569 * guarantees the primary MMU has the entire compound page mapped
 570 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
 571 * can also map the entire compound page. This allows the secondary
 572 * MMUs to call get_user_pages() only once for each compound page and
 573 * to immediately map the entire compound page with a single secondary
 574 * MMU fault. If there will be a pmd split later, the secondary MMUs
 575 * will get an update through the MMU notifier invalidation through
 576 * split_huge_pmd().
 577 *
 578 * Unlike PageTransCompound, this is safe to be called only while
 579 * split_huge_pmd() cannot run from under us, like if protected by the
 580 * MMU notifier, otherwise it may result in page->_mapcount < 0 false
 581 * positives.
 582 */
 583static inline int PageTransCompoundMap(struct page *page)
 584{
 585        return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
 586}
 587
 588/*
 589 * PageTransTail returns true for both transparent huge pages
 590 * and hugetlbfs pages, so it should only be called when it's known
 591 * that hugetlbfs pages aren't involved.
 592 */
 593static inline int PageTransTail(struct page *page)
 594{
 595        return PageTail(page);
 596}
 597
 598/*
 599 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
 600 * as PMDs.
 601 *
 602 * This is required for optimization of rmap operations for THP: we can postpone
 603 * per small page mapcount accounting (and its overhead from atomic operations)
 604 * until the first PMD split.
 605 *
 606 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
 607 * by one. This reference will go away with last compound_mapcount.
 608 *
 609 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
 610 */
 611static inline int PageDoubleMap(struct page *page)
 612{
 613        return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
 614}
 615
 616static inline void SetPageDoubleMap(struct page *page)
 617{
 618        VM_BUG_ON_PAGE(!PageHead(page), page);
 619        set_bit(PG_double_map, &page[1].flags);
 620}
 621
 622static inline void ClearPageDoubleMap(struct page *page)
 623{
 624        VM_BUG_ON_PAGE(!PageHead(page), page);
 625        clear_bit(PG_double_map, &page[1].flags);
 626}
 627static inline int TestSetPageDoubleMap(struct page *page)
 628{
 629        VM_BUG_ON_PAGE(!PageHead(page), page);
 630        return test_and_set_bit(PG_double_map, &page[1].flags);
 631}
 632
 633static inline int TestClearPageDoubleMap(struct page *page)
 634{
 635        VM_BUG_ON_PAGE(!PageHead(page), page);
 636        return test_and_clear_bit(PG_double_map, &page[1].flags);
 637}
 638
 639#else
 640TESTPAGEFLAG_FALSE(TransHuge)
 641TESTPAGEFLAG_FALSE(TransCompound)
 642TESTPAGEFLAG_FALSE(TransCompoundMap)
 643TESTPAGEFLAG_FALSE(TransTail)
 644PAGEFLAG_FALSE(DoubleMap)
 645        TESTSETFLAG_FALSE(DoubleMap)
 646        TESTCLEARFLAG_FALSE(DoubleMap)
 647#endif
 648
 649/*
 650 * For pages that are never mapped to userspace (and aren't PageSlab),
 651 * page_type may be used.  Because it is initialised to -1, we invert the
 652 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
 653 * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
 654 * low bits so that an underflow or overflow of page_mapcount() won't be
 655 * mistaken for a page type value.
 656 */
 657
 658#define PAGE_TYPE_BASE  0xf0000000
 659/* Reserve              0x0000007f to catch underflows of page_mapcount */
 660#define PG_buddy        0x00000080
 661#define PG_balloon      0x00000100
 662#define PG_kmemcg       0x00000200
 663#define PG_table        0x00000400
 664
 665#define PageType(page, flag)                                            \
 666        ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
 667
 668#define PAGE_TYPE_OPS(uname, lname)                                     \
 669static __always_inline int Page##uname(struct page *page)               \
 670{                                                                       \
 671        return PageType(page, PG_##lname);                              \
 672}                                                                       \
 673static __always_inline void __SetPage##uname(struct page *page)         \
 674{                                                                       \
 675        VM_BUG_ON_PAGE(!PageType(page, 0), page);                       \
 676        page->page_type &= ~PG_##lname;                                 \
 677}                                                                       \
 678static __always_inline void __ClearPage##uname(struct page *page)       \
 679{                                                                       \
 680        VM_BUG_ON_PAGE(!Page##uname(page), page);                       \
 681        page->page_type |= PG_##lname;                                  \
 682}
 683
 684/*
 685 * PageBuddy() indicates that the page is free and in the buddy system
 686 * (see mm/page_alloc.c).
 687 */
 688PAGE_TYPE_OPS(Buddy, buddy)
 689
 690/*
 691 * PageBalloon() is true for pages that are on the balloon page list
 692 * (see mm/balloon_compaction.c).
 693 */
 694PAGE_TYPE_OPS(Balloon, balloon)
 695
 696/*
 697 * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
 698 * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
 699 */
 700PAGE_TYPE_OPS(Kmemcg, kmemcg)
 701
 702/*
 703 * Marks pages in use as page tables.
 704 */
 705PAGE_TYPE_OPS(Table, table)
 706
 707extern bool is_free_buddy_page(struct page *page);
 708
 709__PAGEFLAG(Isolated, isolated, PF_ANY);
 710
 711/*
 712 * If network-based swap is enabled, sl*b must keep track of whether pages
 713 * were allocated from pfmemalloc reserves.
 714 */
 715static inline int PageSlabPfmemalloc(struct page *page)
 716{
 717        VM_BUG_ON_PAGE(!PageSlab(page), page);
 718        return PageActive(page);
 719}
 720
 721static inline void SetPageSlabPfmemalloc(struct page *page)
 722{
 723        VM_BUG_ON_PAGE(!PageSlab(page), page);
 724        SetPageActive(page);
 725}
 726
 727static inline void __ClearPageSlabPfmemalloc(struct page *page)
 728{
 729        VM_BUG_ON_PAGE(!PageSlab(page), page);
 730        __ClearPageActive(page);
 731}
 732
 733static inline void ClearPageSlabPfmemalloc(struct page *page)
 734{
 735        VM_BUG_ON_PAGE(!PageSlab(page), page);
 736        ClearPageActive(page);
 737}
 738
 739#ifdef CONFIG_MMU
 740#define __PG_MLOCKED            (1UL << PG_mlocked)
 741#else
 742#define __PG_MLOCKED            0
 743#endif
 744
 745/*
 746 * Flags checked when a page is freed.  Pages being freed should not have
 747 * these flags set.  It they are, there is a problem.
 748 */
 749#define PAGE_FLAGS_CHECK_AT_FREE                                \
 750        (1UL << PG_lru          | 1UL << PG_locked      |       \
 751         1UL << PG_private      | 1UL << PG_private_2   |       \
 752         1UL << PG_writeback    | 1UL << PG_reserved    |       \
 753         1UL << PG_slab         | 1UL << PG_active      |       \
 754         1UL << PG_unevictable  | __PG_MLOCKED)
 755
 756/*
 757 * Flags checked when a page is prepped for return by the page allocator.
 758 * Pages being prepped should not have these flags set.  It they are set,
 759 * there has been a kernel bug or struct page corruption.
 760 *
 761 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
 762 * alloc-free cycle to prevent from reusing the page.
 763 */
 764#define PAGE_FLAGS_CHECK_AT_PREP        \
 765        (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
 766
 767#define PAGE_FLAGS_PRIVATE                              \
 768        (1UL << PG_private | 1UL << PG_private_2)
 769/**
 770 * page_has_private - Determine if page has private stuff
 771 * @page: The page to be checked
 772 *
 773 * Determine if a page has private stuff, indicating that release routines
 774 * should be invoked upon it.
 775 */
 776static inline int page_has_private(struct page *page)
 777{
 778        return !!(page->flags & PAGE_FLAGS_PRIVATE);
 779}
 780
 781#undef PF_ANY
 782#undef PF_HEAD
 783#undef PF_ONLY_HEAD
 784#undef PF_NO_TAIL
 785#undef PF_NO_COMPOUND
 786#endif /* !__GENERATING_BOUNDS_H */
 787
 788#endif  /* PAGE_FLAGS_H */
 789