linux/include/linux/page-flags.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Macros for manipulating and testing page->flags
   4 */
   5
   6#ifndef PAGE_FLAGS_H
   7#define PAGE_FLAGS_H
   8
   9#include <linux/types.h>
  10#include <linux/bug.h>
  11#include <linux/mmdebug.h>
  12#ifndef __GENERATING_BOUNDS_H
  13#include <linux/mm_types.h>
  14#include <generated/bounds.h>
  15#endif /* !__GENERATING_BOUNDS_H */
  16
  17/*
  18 * Various page->flags bits:
  19 *
  20 * PG_reserved is set for special pages, which can never be swapped out. Some
  21 * of them might not even exist...
  22 *
  23 * The PG_private bitflag is set on pagecache pages if they contain filesystem
  24 * specific data (which is normally at page->private). It can be used by
  25 * private allocations for its own usage.
  26 *
  27 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
  28 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
  29 * is set before writeback starts and cleared when it finishes.
  30 *
  31 * PG_locked also pins a page in pagecache, and blocks truncation of the file
  32 * while it is held.
  33 *
  34 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
  35 * to become unlocked.
  36 *
  37 * PG_uptodate tells whether the page's contents is valid.  When a read
  38 * completes, the page becomes uptodate, unless a disk I/O error happened.
  39 *
  40 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
  41 * file-backed pagecache (see mm/vmscan.c).
  42 *
  43 * PG_error is set to indicate that an I/O error occurred on this page.
  44 *
  45 * PG_arch_1 is an architecture specific page state bit.  The generic code
  46 * guarantees that this bit is cleared for a page when it first is entered into
  47 * the page cache.
  48 *
  49 * PG_hwpoison indicates that a page got corrupted in hardware and contains
  50 * data with incorrect ECC bits that triggered a machine check. Accessing is
  51 * not safe since it may cause another machine check. Don't touch!
  52 */
  53
  54/*
  55 * Don't use the *_dontuse flags.  Use the macros.  Otherwise you'll break
  56 * locked- and dirty-page accounting.
  57 *
  58 * The page flags field is split into two parts, the main flags area
  59 * which extends from the low bits upwards, and the fields area which
  60 * extends from the high bits downwards.
  61 *
  62 *  | FIELD | ... | FLAGS |
  63 *  N-1           ^       0
  64 *               (NR_PAGEFLAGS)
  65 *
  66 * The fields area is reserved for fields mapping zone, node (for NUMA) and
  67 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
  68 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
  69 */
  70enum pageflags {
  71        PG_locked,              /* Page is locked. Don't touch. */
  72        PG_error,
  73        PG_referenced,
  74        PG_uptodate,
  75        PG_dirty,
  76        PG_lru,
  77        PG_active,
  78        PG_waiters,             /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
  79        PG_slab,
  80        PG_owner_priv_1,        /* Owner use. If pagecache, fs may use*/
  81        PG_arch_1,
  82        PG_reserved,
  83        PG_private,             /* If pagecache, has fs-private data */
  84        PG_private_2,           /* If pagecache, has fs aux data */
  85        PG_writeback,           /* Page is under writeback */
  86        PG_head,                /* A head page */
  87        PG_mappedtodisk,        /* Has blocks allocated on-disk */
  88        PG_reclaim,             /* To be reclaimed asap */
  89        PG_swapbacked,          /* Page is backed by RAM/swap */
  90        PG_unevictable,         /* Page is "unevictable"  */
  91#ifdef CONFIG_MMU
  92        PG_mlocked,             /* Page is vma mlocked */
  93#endif
  94#ifdef CONFIG_ARCH_USES_PG_UNCACHED
  95        PG_uncached,            /* Page has been mapped as uncached */
  96#endif
  97#ifdef CONFIG_MEMORY_FAILURE
  98        PG_hwpoison,            /* hardware poisoned page. Don't touch */
  99#endif
 100#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
 101        PG_young,
 102        PG_idle,
 103#endif
 104#ifndef __GENKSYMS__
 105        /*
 106         * RHEL8: New page flags should be put here to avoid changing
 107         * kABI signature.
 108         */
 109        PG_workingset,
 110#endif
 111        __NR_PAGEFLAGS,
 112
 113        /* Filesystems */
 114        PG_checked = PG_owner_priv_1,
 115
 116        /* SwapBacked */
 117        PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
 118
 119        /* Two page bits are conscripted by FS-Cache to maintain local caching
 120         * state.  These bits are set on pages belonging to the netfs's inodes
 121         * when those inodes are being locally cached.
 122         */
 123        PG_fscache = PG_private_2,      /* page backed by cache */
 124
 125        /* XEN */
 126        /* Pinned in Xen as a read-only pagetable page. */
 127        PG_pinned = PG_owner_priv_1,
 128        /* Pinned as part of domain save (see xen_mm_pin_all()). */
 129        PG_savepinned = PG_dirty,
 130        /* Has a grant mapping of another (foreign) domain's page. */
 131        PG_foreign = PG_owner_priv_1,
 132
 133        /* SLOB */
 134        PG_slob_free = PG_private,
 135
 136        /* Compound pages. Stored in first tail page's flags */
 137        PG_double_map = PG_private_2,
 138
 139        /* non-lru isolated movable page */
 140        PG_isolated = PG_reclaim,
 141};
 142
 143#ifndef __GENERATING_BOUNDS_H
 144
 145struct page;    /* forward declaration */
 146
 147static inline struct page *compound_head(struct page *page)
 148{
 149        unsigned long head = READ_ONCE(page->compound_head);
 150
 151        if (unlikely(head & 1))
 152                return (struct page *) (head - 1);
 153        return page;
 154}
 155
 156static __always_inline int PageTail(struct page *page)
 157{
 158        return READ_ONCE(page->compound_head) & 1;
 159}
 160
 161static __always_inline int PageCompound(struct page *page)
 162{
 163        return test_bit(PG_head, &page->flags) || PageTail(page);
 164}
 165
 166#define PAGE_POISON_PATTERN     -1l
 167static inline int PagePoisoned(const struct page *page)
 168{
 169        return page->flags == PAGE_POISON_PATTERN;
 170}
 171
 172#ifdef CONFIG_DEBUG_VM
 173void page_init_poison(struct page *page, size_t size);
 174#else
 175static inline void page_init_poison(struct page *page, size_t size)
 176{
 177}
 178#endif
 179
 180/*
 181 * Page flags policies wrt compound pages
 182 *
 183 * PF_POISONED_CHECK
 184 *     check if this struct page poisoned/uninitialized
 185 *
 186 * PF_ANY:
 187 *     the page flag is relevant for small, head and tail pages.
 188 *
 189 * PF_HEAD:
 190 *     for compound page all operations related to the page flag applied to
 191 *     head page.
 192 *
 193 * PF_ONLY_HEAD:
 194 *     for compound page, callers only ever operate on the head page.
 195 *
 196 * PF_NO_TAIL:
 197 *     modifications of the page flag must be done on small or head pages,
 198 *     checks can be done on tail pages too.
 199 *
 200 * PF_NO_COMPOUND:
 201 *     the page flag is not relevant for compound pages.
 202 */
 203#define PF_POISONED_CHECK(page) ({                                      \
 204                VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);            \
 205                page; })
 206#define PF_ANY(page, enforce)   PF_POISONED_CHECK(page)
 207#define PF_HEAD(page, enforce)  PF_POISONED_CHECK(compound_head(page))
 208#define PF_ONLY_HEAD(page, enforce) ({                                  \
 209                VM_BUG_ON_PGFLAGS(PageTail(page), page);                \
 210                PF_POISONED_CHECK(page); })
 211#define PF_NO_TAIL(page, enforce) ({                                    \
 212                VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);     \
 213                PF_POISONED_CHECK(compound_head(page)); })
 214#define PF_NO_COMPOUND(page, enforce) ({                                \
 215                VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
 216                PF_POISONED_CHECK(page); })
 217
 218/*
 219 * Macros to create function definitions for page flags
 220 */
 221#define TESTPAGEFLAG(uname, lname, policy)                              \
 222static __always_inline int Page##uname(struct page *page)               \
 223        { return test_bit(PG_##lname, &policy(page, 0)->flags); }
 224
 225#define SETPAGEFLAG(uname, lname, policy)                               \
 226static __always_inline void SetPage##uname(struct page *page)           \
 227        { set_bit(PG_##lname, &policy(page, 1)->flags); }
 228
 229#define CLEARPAGEFLAG(uname, lname, policy)                             \
 230static __always_inline void ClearPage##uname(struct page *page)         \
 231        { clear_bit(PG_##lname, &policy(page, 1)->flags); }
 232
 233#define __SETPAGEFLAG(uname, lname, policy)                             \
 234static __always_inline void __SetPage##uname(struct page *page)         \
 235        { __set_bit(PG_##lname, &policy(page, 1)->flags); }
 236
 237#define __CLEARPAGEFLAG(uname, lname, policy)                           \
 238static __always_inline void __ClearPage##uname(struct page *page)       \
 239        { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
 240
 241#define TESTSETFLAG(uname, lname, policy)                               \
 242static __always_inline int TestSetPage##uname(struct page *page)        \
 243        { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
 244
 245#define TESTCLEARFLAG(uname, lname, policy)                             \
 246static __always_inline int TestClearPage##uname(struct page *page)      \
 247        { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
 248
 249#define PAGEFLAG(uname, lname, policy)                                  \
 250        TESTPAGEFLAG(uname, lname, policy)                              \
 251        SETPAGEFLAG(uname, lname, policy)                               \
 252        CLEARPAGEFLAG(uname, lname, policy)
 253
 254#define __PAGEFLAG(uname, lname, policy)                                \
 255        TESTPAGEFLAG(uname, lname, policy)                              \
 256        __SETPAGEFLAG(uname, lname, policy)                             \
 257        __CLEARPAGEFLAG(uname, lname, policy)
 258
 259#define TESTSCFLAG(uname, lname, policy)                                \
 260        TESTSETFLAG(uname, lname, policy)                               \
 261        TESTCLEARFLAG(uname, lname, policy)
 262
 263#define TESTPAGEFLAG_FALSE(uname)                                       \
 264static inline int Page##uname(const struct page *page) { return 0; }
 265
 266#define SETPAGEFLAG_NOOP(uname)                                         \
 267static inline void SetPage##uname(struct page *page) {  }
 268
 269#define CLEARPAGEFLAG_NOOP(uname)                                       \
 270static inline void ClearPage##uname(struct page *page) {  }
 271
 272#define __CLEARPAGEFLAG_NOOP(uname)                                     \
 273static inline void __ClearPage##uname(struct page *page) {  }
 274
 275#define TESTSETFLAG_FALSE(uname)                                        \
 276static inline int TestSetPage##uname(struct page *page) { return 0; }
 277
 278#define TESTCLEARFLAG_FALSE(uname)                                      \
 279static inline int TestClearPage##uname(struct page *page) { return 0; }
 280
 281#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)                 \
 282        SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
 283
 284#define TESTSCFLAG_FALSE(uname)                                         \
 285        TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
 286
 287__PAGEFLAG(Locked, locked, PF_NO_TAIL)
 288PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
 289PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
 290PAGEFLAG(Referenced, referenced, PF_HEAD)
 291        TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
 292        __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
 293PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
 294        __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
 295PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
 296PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
 297        TESTCLEARFLAG(Active, active, PF_HEAD)
 298PAGEFLAG(Workingset, workingset, PF_HEAD)
 299        TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
 300__PAGEFLAG(Slab, slab, PF_NO_TAIL)
 301__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
 302PAGEFLAG(Checked, checked, PF_NO_COMPOUND)         /* Used by some filesystems */
 303
 304/* Xen */
 305PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
 306        TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
 307PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
 308PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
 309
 310PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
 311        __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
 312        __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
 313PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
 314        __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
 315        __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
 316
 317/*
 318 * Private page markings that may be used by the filesystem that owns the page
 319 * for its own purposes.
 320 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
 321 */
 322PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
 323        __CLEARPAGEFLAG(Private, private, PF_ANY)
 324PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
 325PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
 326        TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
 327
 328/*
 329 * Only test-and-set exist for PG_writeback.  The unconditional operators are
 330 * risky: they bypass page accounting.
 331 */
 332TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
 333        TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
 334PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
 335
 336/* PG_readahead is only used for reads; PG_reclaim is only for writes */
 337PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
 338        TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
 339PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
 340        TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
 341
 342#ifdef CONFIG_HIGHMEM
 343/*
 344 * Must use a macro here due to header dependency issues. page_zone() is not
 345 * available at this point.
 346 */
 347#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
 348#else
 349PAGEFLAG_FALSE(HighMem)
 350#endif
 351
 352#ifdef CONFIG_SWAP
 353static __always_inline int PageSwapCache(struct page *page)
 354{
 355#ifdef CONFIG_THP_SWAP
 356        page = compound_head(page);
 357#endif
 358        return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
 359
 360}
 361SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
 362CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
 363#else
 364PAGEFLAG_FALSE(SwapCache)
 365#endif
 366
 367PAGEFLAG(Unevictable, unevictable, PF_HEAD)
 368        __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
 369        TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
 370
 371#ifdef CONFIG_MMU
 372PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
 373        __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
 374        TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
 375#else
 376PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
 377        TESTSCFLAG_FALSE(Mlocked)
 378#endif
 379
 380#ifdef CONFIG_ARCH_USES_PG_UNCACHED
 381PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
 382#else
 383PAGEFLAG_FALSE(Uncached)
 384#endif
 385
 386#ifdef CONFIG_MEMORY_FAILURE
 387PAGEFLAG(HWPoison, hwpoison, PF_ANY)
 388TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
 389#define __PG_HWPOISON (1UL << PG_hwpoison)
 390extern bool set_hwpoison_free_buddy_page(struct page *page);
 391#else
 392PAGEFLAG_FALSE(HWPoison)
 393static inline bool set_hwpoison_free_buddy_page(struct page *page)
 394{
 395        return 0;
 396}
 397#define __PG_HWPOISON 0
 398#endif
 399
 400#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
 401TESTPAGEFLAG(Young, young, PF_ANY)
 402SETPAGEFLAG(Young, young, PF_ANY)
 403TESTCLEARFLAG(Young, young, PF_ANY)
 404PAGEFLAG(Idle, idle, PF_ANY)
 405#endif
 406
 407/*
 408 * On an anonymous page mapped into a user virtual memory area,
 409 * page->mapping points to its anon_vma, not to a struct address_space;
 410 * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
 411 *
 412 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
 413 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
 414 * bit; and then page->mapping points, not to an anon_vma, but to a private
 415 * structure which KSM associates with that merged page.  See ksm.h.
 416 *
 417 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
 418 * page and then page->mapping points a struct address_space.
 419 *
 420 * Please note that, confusingly, "page_mapping" refers to the inode
 421 * address_space which maps the page from disk; whereas "page_mapped"
 422 * refers to user virtual address space into which the page is mapped.
 423 */
 424#define PAGE_MAPPING_ANON       0x1
 425#define PAGE_MAPPING_MOVABLE    0x2
 426#define PAGE_MAPPING_KSM        (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 427#define PAGE_MAPPING_FLAGS      (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
 428
 429static __always_inline int PageMappingFlags(struct page *page)
 430{
 431        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
 432}
 433
 434static __always_inline int PageAnon(struct page *page)
 435{
 436        page = compound_head(page);
 437        return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
 438}
 439
 440static __always_inline int __PageMovable(struct page *page)
 441{
 442        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
 443                                PAGE_MAPPING_MOVABLE;
 444}
 445
 446#ifdef CONFIG_KSM
 447/*
 448 * A KSM page is one of those write-protected "shared pages" or "merged pages"
 449 * which KSM maps into multiple mms, wherever identical anonymous page content
 450 * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
 451 * anon_vma, but to that page's node of the stable tree.
 452 */
 453static __always_inline int PageKsm(struct page *page)
 454{
 455        page = compound_head(page);
 456        return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
 457                                PAGE_MAPPING_KSM;
 458}
 459#else
 460TESTPAGEFLAG_FALSE(Ksm)
 461#endif
 462
 463u64 stable_page_flags(struct page *page);
 464
 465static inline int PageUptodate(struct page *page)
 466{
 467        int ret;
 468        page = compound_head(page);
 469        ret = test_bit(PG_uptodate, &(page)->flags);
 470        /*
 471         * Must ensure that the data we read out of the page is loaded
 472         * _after_ we've loaded page->flags to check for PageUptodate.
 473         * We can skip the barrier if the page is not uptodate, because
 474         * we wouldn't be reading anything from it.
 475         *
 476         * See SetPageUptodate() for the other side of the story.
 477         */
 478        if (ret)
 479                smp_rmb();
 480
 481        return ret;
 482}
 483
 484static __always_inline void __SetPageUptodate(struct page *page)
 485{
 486        VM_BUG_ON_PAGE(PageTail(page), page);
 487        smp_wmb();
 488        __set_bit(PG_uptodate, &page->flags);
 489}
 490
 491static __always_inline void SetPageUptodate(struct page *page)
 492{
 493        VM_BUG_ON_PAGE(PageTail(page), page);
 494        /*
 495         * Memory barrier must be issued before setting the PG_uptodate bit,
 496         * so that all previous stores issued in order to bring the page
 497         * uptodate are actually visible before PageUptodate becomes true.
 498         */
 499        smp_wmb();
 500        set_bit(PG_uptodate, &page->flags);
 501}
 502
 503CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
 504
 505int test_clear_page_writeback(struct page *page);
 506int __test_set_page_writeback(struct page *page, bool keep_write);
 507
 508#define test_set_page_writeback(page)                   \
 509        __test_set_page_writeback(page, false)
 510#define test_set_page_writeback_keepwrite(page) \
 511        __test_set_page_writeback(page, true)
 512
 513static inline void set_page_writeback(struct page *page)
 514{
 515        test_set_page_writeback(page);
 516}
 517
 518static inline void set_page_writeback_keepwrite(struct page *page)
 519{
 520        test_set_page_writeback_keepwrite(page);
 521}
 522
 523__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
 524
 525static __always_inline void set_compound_head(struct page *page, struct page *head)
 526{
 527        WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
 528}
 529
 530static __always_inline void clear_compound_head(struct page *page)
 531{
 532        WRITE_ONCE(page->compound_head, 0);
 533}
 534
 535#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 536static inline void ClearPageCompound(struct page *page)
 537{
 538        BUG_ON(!PageHead(page));
 539        ClearPageHead(page);
 540}
 541#endif
 542
 543#define PG_head_mask ((1UL << PG_head))
 544
 545#ifdef CONFIG_HUGETLB_PAGE
 546int PageHuge(struct page *page);
 547int PageHeadHuge(struct page *page);
 548bool page_huge_active(struct page *page);
 549#else
 550TESTPAGEFLAG_FALSE(Huge)
 551TESTPAGEFLAG_FALSE(HeadHuge)
 552
 553static inline bool page_huge_active(struct page *page)
 554{
 555        return 0;
 556}
 557#endif
 558
 559
 560#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 561/*
 562 * PageHuge() only returns true for hugetlbfs pages, but not for
 563 * normal or transparent huge pages.
 564 *
 565 * PageTransHuge() returns true for both transparent huge and
 566 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
 567 * called only in the core VM paths where hugetlbfs pages can't exist.
 568 */
 569static inline int PageTransHuge(struct page *page)
 570{
 571        VM_BUG_ON_PAGE(PageTail(page), page);
 572        return PageHead(page);
 573}
 574
 575/*
 576 * PageTransCompound returns true for both transparent huge pages
 577 * and hugetlbfs pages, so it should only be called when it's known
 578 * that hugetlbfs pages aren't involved.
 579 */
 580static inline int PageTransCompound(struct page *page)
 581{
 582        return PageCompound(page);
 583}
 584
 585/*
 586 * PageTransCompoundMap is the same as PageTransCompound, but it also
 587 * guarantees the primary MMU has the entire compound page mapped
 588 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
 589 * can also map the entire compound page. This allows the secondary
 590 * MMUs to call get_user_pages() only once for each compound page and
 591 * to immediately map the entire compound page with a single secondary
 592 * MMU fault. If there will be a pmd split later, the secondary MMUs
 593 * will get an update through the MMU notifier invalidation through
 594 * split_huge_pmd().
 595 *
 596 * Unlike PageTransCompound, this is safe to be called only while
 597 * split_huge_pmd() cannot run from under us, like if protected by the
 598 * MMU notifier, otherwise it may result in page->_mapcount < 0 false
 599 * positives.
 600 */
 601static inline int PageTransCompoundMap(struct page *page)
 602{
 603        return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
 604}
 605
 606/*
 607 * PageTransTail returns true for both transparent huge pages
 608 * and hugetlbfs pages, so it should only be called when it's known
 609 * that hugetlbfs pages aren't involved.
 610 */
 611static inline int PageTransTail(struct page *page)
 612{
 613        return PageTail(page);
 614}
 615
 616/*
 617 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
 618 * as PMDs.
 619 *
 620 * This is required for optimization of rmap operations for THP: we can postpone
 621 * per small page mapcount accounting (and its overhead from atomic operations)
 622 * until the first PMD split.
 623 *
 624 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
 625 * by one. This reference will go away with last compound_mapcount.
 626 *
 627 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
 628 */
 629static inline int PageDoubleMap(struct page *page)
 630{
 631        return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
 632}
 633
 634static inline void SetPageDoubleMap(struct page *page)
 635{
 636        VM_BUG_ON_PAGE(!PageHead(page), page);
 637        set_bit(PG_double_map, &page[1].flags);
 638}
 639
 640static inline void ClearPageDoubleMap(struct page *page)
 641{
 642        VM_BUG_ON_PAGE(!PageHead(page), page);
 643        clear_bit(PG_double_map, &page[1].flags);
 644}
 645static inline int TestSetPageDoubleMap(struct page *page)
 646{
 647        VM_BUG_ON_PAGE(!PageHead(page), page);
 648        return test_and_set_bit(PG_double_map, &page[1].flags);
 649}
 650
 651static inline int TestClearPageDoubleMap(struct page *page)
 652{
 653        VM_BUG_ON_PAGE(!PageHead(page), page);
 654        return test_and_clear_bit(PG_double_map, &page[1].flags);
 655}
 656
 657#else
 658TESTPAGEFLAG_FALSE(TransHuge)
 659TESTPAGEFLAG_FALSE(TransCompound)
 660TESTPAGEFLAG_FALSE(TransCompoundMap)
 661TESTPAGEFLAG_FALSE(TransTail)
 662PAGEFLAG_FALSE(DoubleMap)
 663        TESTSETFLAG_FALSE(DoubleMap)
 664        TESTCLEARFLAG_FALSE(DoubleMap)
 665#endif
 666
 667/*
 668 * For pages that are never mapped to userspace (and aren't PageSlab),
 669 * page_type may be used.  Because it is initialised to -1, we invert the
 670 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
 671 * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
 672 * low bits so that an underflow or overflow of page_mapcount() won't be
 673 * mistaken for a page type value.
 674 */
 675
 676#define PAGE_TYPE_BASE  0xf0000000
 677/* Reserve              0x0000007f to catch underflows of page_mapcount */
 678#define PAGE_MAPCOUNT_RESERVE   -128
 679#define PG_buddy        0x00000080
 680#define PG_offline      0x00000100
 681#define PG_kmemcg       0x00000200
 682#define PG_table        0x00000400
 683
 684#define PageType(page, flag)                                            \
 685        ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
 686
 687static inline int page_has_type(struct page *page)
 688{
 689        return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
 690}
 691
 692#define PAGE_TYPE_OPS(uname, lname)                                     \
 693static __always_inline int Page##uname(struct page *page)               \
 694{                                                                       \
 695        return PageType(page, PG_##lname);                              \
 696}                                                                       \
 697static __always_inline void __SetPage##uname(struct page *page)         \
 698{                                                                       \
 699        VM_BUG_ON_PAGE(!PageType(page, 0), page);                       \
 700        page->page_type &= ~PG_##lname;                                 \
 701}                                                                       \
 702static __always_inline void __ClearPage##uname(struct page *page)       \
 703{                                                                       \
 704        VM_BUG_ON_PAGE(!Page##uname(page), page);                       \
 705        page->page_type |= PG_##lname;                                  \
 706}
 707
 708/*
 709 * PageBuddy() indicates that the page is free and in the buddy system
 710 * (see mm/page_alloc.c).
 711 */
 712PAGE_TYPE_OPS(Buddy, buddy)
 713
 714/*
 715 * PageOffline() indicates that the page is logically offline although the
 716 * containing section is online. (e.g. inflated in a balloon driver or
 717 * not onlined when onlining the section).
 718 * The content of these pages is effectively stale. Such pages should not
 719 * be touched (read/write/dump/save) except by their owner.
 720 */
 721PAGE_TYPE_OPS(Offline, offline)
 722
 723/*
 724 * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
 725 * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
 726 */
 727PAGE_TYPE_OPS(Kmemcg, kmemcg)
 728
 729/*
 730 * Marks pages in use as page tables.
 731 */
 732PAGE_TYPE_OPS(Table, table)
 733
 734extern bool is_free_buddy_page(struct page *page);
 735
 736__PAGEFLAG(Isolated, isolated, PF_ANY);
 737
 738/*
 739 * If network-based swap is enabled, sl*b must keep track of whether pages
 740 * were allocated from pfmemalloc reserves.
 741 */
 742static inline int PageSlabPfmemalloc(struct page *page)
 743{
 744        VM_BUG_ON_PAGE(!PageSlab(page), page);
 745        return PageActive(page);
 746}
 747
 748static inline void SetPageSlabPfmemalloc(struct page *page)
 749{
 750        VM_BUG_ON_PAGE(!PageSlab(page), page);
 751        SetPageActive(page);
 752}
 753
 754static inline void __ClearPageSlabPfmemalloc(struct page *page)
 755{
 756        VM_BUG_ON_PAGE(!PageSlab(page), page);
 757        __ClearPageActive(page);
 758}
 759
 760static inline void ClearPageSlabPfmemalloc(struct page *page)
 761{
 762        VM_BUG_ON_PAGE(!PageSlab(page), page);
 763        ClearPageActive(page);
 764}
 765
 766#ifdef CONFIG_MMU
 767#define __PG_MLOCKED            (1UL << PG_mlocked)
 768#else
 769#define __PG_MLOCKED            0
 770#endif
 771
 772/*
 773 * Flags checked when a page is freed.  Pages being freed should not have
 774 * these flags set.  It they are, there is a problem.
 775 */
 776#define PAGE_FLAGS_CHECK_AT_FREE                                \
 777        (1UL << PG_lru          | 1UL << PG_locked      |       \
 778         1UL << PG_private      | 1UL << PG_private_2   |       \
 779         1UL << PG_writeback    | 1UL << PG_reserved    |       \
 780         1UL << PG_slab         | 1UL << PG_active      |       \
 781         1UL << PG_unevictable  | __PG_MLOCKED)
 782
 783/*
 784 * Flags checked when a page is prepped for return by the page allocator.
 785 * Pages being prepped should not have these flags set.  It they are set,
 786 * there has been a kernel bug or struct page corruption.
 787 *
 788 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
 789 * alloc-free cycle to prevent from reusing the page.
 790 */
 791#define PAGE_FLAGS_CHECK_AT_PREP        \
 792        (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
 793
 794#define PAGE_FLAGS_PRIVATE                              \
 795        (1UL << PG_private | 1UL << PG_private_2)
 796/**
 797 * page_has_private - Determine if page has private stuff
 798 * @page: The page to be checked
 799 *
 800 * Determine if a page has private stuff, indicating that release routines
 801 * should be invoked upon it.
 802 */
 803static inline int page_has_private(struct page *page)
 804{
 805        return !!(page->flags & PAGE_FLAGS_PRIVATE);
 806}
 807
 808#undef PF_ANY
 809#undef PF_HEAD
 810#undef PF_ONLY_HEAD
 811#undef PF_NO_TAIL
 812#undef PF_NO_COMPOUND
 813#endif /* !__GENERATING_BOUNDS_H */
 814
 815#endif  /* PAGE_FLAGS_H */
 816