linux/mm/internal.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/* internal.h: mm/ internal definitions
   3 *
   4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7#ifndef __MM_INTERNAL_H
   8#define __MM_INTERNAL_H
   9
  10#include <linux/fs.h>
  11#include <linux/mm.h>
  12#include <linux/pagemap.h>
  13#include <linux/tracepoint-defs.h>
  14
  15/*
  16 * The set of flags that only affect watermark checking and reclaim
  17 * behaviour. This is used by the MM to obey the caller constraints
  18 * about IO, FS and watermark checking while ignoring placement
  19 * hints such as HIGHMEM usage.
  20 */
  21#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
  22                        __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
  23                        __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
  24                        __GFP_ATOMIC)
  25
  26/* The GFP flags allowed during early boot */
  27#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
  28
  29/* Control allocation cpuset and node placement constraints */
  30#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
  31
  32/* Do not use these with a slab allocator */
  33#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
  34
  35void page_writeback_init(void);
  36
  37vm_fault_t do_swap_page(struct vm_fault *vmf);
  38
  39void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
  40                unsigned long floor, unsigned long ceiling);
  41
  42static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
  43{
  44        return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
  45}
  46
  47void unmap_page_range(struct mmu_gather *tlb,
  48                             struct vm_area_struct *vma,
  49                             unsigned long addr, unsigned long end,
  50                             struct zap_details *details);
  51
  52void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read,
  53                unsigned long lookahead_size);
  54void force_page_cache_ra(struct readahead_control *, struct file_ra_state *,
  55                unsigned long nr);
  56static inline void force_page_cache_readahead(struct address_space *mapping,
  57                struct file *file, pgoff_t index, unsigned long nr_to_read)
  58{
  59        DEFINE_READAHEAD(ractl, file, mapping, index);
  60        force_page_cache_ra(&ractl, &file->f_ra, nr_to_read);
  61}
  62
  63struct page *find_get_entry(struct address_space *mapping, pgoff_t index);
  64struct page *find_lock_entry(struct address_space *mapping, pgoff_t index);
  65
  66/**
  67 * page_evictable - test whether a page is evictable
  68 * @page: the page to test
  69 *
  70 * Test whether page is evictable--i.e., should be placed on active/inactive
  71 * lists vs unevictable list.
  72 *
  73 * Reasons page might not be evictable:
  74 * (1) page's mapping marked unevictable
  75 * (2) page is part of an mlocked VMA
  76 *
  77 */
  78static inline bool page_evictable(struct page *page)
  79{
  80        bool ret;
  81
  82        /* Prevent address_space of inode and swap cache from being freed */
  83        rcu_read_lock();
  84        ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
  85        rcu_read_unlock();
  86        return ret;
  87}
  88
  89/*
  90 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
  91 * a count of one.
  92 */
  93static inline void set_page_refcounted(struct page *page)
  94{
  95        VM_BUG_ON_PAGE(PageTail(page), page);
  96        VM_BUG_ON_PAGE(page_ref_count(page), page);
  97        set_page_count(page, 1);
  98}
  99
 100extern unsigned long highest_memmap_pfn;
 101
 102/*
 103 * Maximum number of reclaim retries without progress before the OOM
 104 * killer is consider the only way forward.
 105 */
 106#define MAX_RECLAIM_RETRIES 16
 107
 108/*
 109 * in mm/vmscan.c:
 110 */
 111extern int isolate_lru_page(struct page *page);
 112extern void putback_lru_page(struct page *page);
 113
 114/*
 115 * in mm/rmap.c:
 116 */
 117extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
 118
 119/*
 120 * in mm/page_alloc.c
 121 */
 122
 123/*
 124 * Structure for holding the mostly immutable allocation parameters passed
 125 * between functions involved in allocations, including the alloc_pages*
 126 * family of functions.
 127 *
 128 * nodemask, migratetype and highest_zoneidx are initialized only once in
 129 * __alloc_pages_nodemask() and then never change.
 130 *
 131 * zonelist, preferred_zone and highest_zoneidx are set first in
 132 * __alloc_pages_nodemask() for the fast path, and might be later changed
 133 * in __alloc_pages_slowpath(). All other functions pass the whole structure
 134 * by a const pointer.
 135 */
 136struct alloc_context {
 137        struct zonelist *zonelist;
 138        nodemask_t *nodemask;
 139        struct zoneref *preferred_zoneref;
 140        int migratetype;
 141
 142        /*
 143         * highest_zoneidx represents highest usable zone index of
 144         * the allocation request. Due to the nature of the zone,
 145         * memory on lower zone than the highest_zoneidx will be
 146         * protected by lowmem_reserve[highest_zoneidx].
 147         *
 148         * highest_zoneidx is also used by reclaim/compaction to limit
 149         * the target zone since higher zone than this index cannot be
 150         * usable for this allocation request.
 151         */
 152        enum zone_type highest_zoneidx;
 153        bool spread_dirty_pages;
 154};
 155
 156/*
 157 * Locate the struct page for both the matching buddy in our
 158 * pair (buddy1) and the combined O(n+1) page they form (page).
 159 *
 160 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 161 * the following equation:
 162 *     B2 = B1 ^ (1 << O)
 163 * For example, if the starting buddy (buddy2) is #8 its order
 164 * 1 buddy is #10:
 165 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 166 *
 167 * 2) Any buddy B will have an order O+1 parent P which
 168 * satisfies the following equation:
 169 *     P = B & ~(1 << O)
 170 *
 171 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
 172 */
 173static inline unsigned long
 174__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
 175{
 176        return page_pfn ^ (1 << order);
 177}
 178
 179extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
 180                                unsigned long end_pfn, struct zone *zone);
 181
 182static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
 183                                unsigned long end_pfn, struct zone *zone)
 184{
 185        if (zone->contiguous)
 186                return pfn_to_page(start_pfn);
 187
 188        return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
 189}
 190
 191extern int __isolate_free_page(struct page *page, unsigned int order);
 192extern void __putback_isolated_page(struct page *page, unsigned int order,
 193                                    int mt);
 194extern void memblock_free_pages(struct page *page, unsigned long pfn,
 195                                        unsigned int order);
 196extern void __free_pages_core(struct page *page, unsigned int order);
 197extern void prep_compound_page(struct page *page, unsigned int order);
 198extern void post_alloc_hook(struct page *page, unsigned int order,
 199                                        gfp_t gfp_flags);
 200extern int user_min_free_kbytes;
 201
 202extern void zone_pcp_update(struct zone *zone);
 203extern void zone_pcp_reset(struct zone *zone);
 204
 205#if defined CONFIG_COMPACTION || defined CONFIG_CMA
 206
 207/*
 208 * in mm/compaction.c
 209 */
 210/*
 211 * compact_control is used to track pages being migrated and the free pages
 212 * they are being migrated to during memory compaction. The free_pfn starts
 213 * at the end of a zone and migrate_pfn begins at the start. Movable pages
 214 * are moved to the end of a zone during a compaction run and the run
 215 * completes when free_pfn <= migrate_pfn
 216 */
 217struct compact_control {
 218        struct list_head freepages;     /* List of free pages to migrate to */
 219        struct list_head migratepages;  /* List of pages being migrated */
 220        unsigned int nr_freepages;      /* Number of isolated free pages */
 221        unsigned int nr_migratepages;   /* Number of pages to migrate */
 222        unsigned long free_pfn;         /* isolate_freepages search base */
 223        unsigned long migrate_pfn;      /* isolate_migratepages search base */
 224        unsigned long fast_start_pfn;   /* a pfn to start linear scan from */
 225        struct zone *zone;
 226        unsigned long total_migrate_scanned;
 227        unsigned long total_free_scanned;
 228        unsigned short fast_search_fail;/* failures to use free list searches */
 229        short search_order;             /* order to start a fast search at */
 230        const gfp_t gfp_mask;           /* gfp mask of a direct compactor */
 231        int order;                      /* order a direct compactor needs */
 232        int migratetype;                /* migratetype of direct compactor */
 233        const unsigned int alloc_flags; /* alloc flags of a direct compactor */
 234        const int highest_zoneidx;      /* zone index of a direct compactor */
 235        enum migrate_mode mode;         /* Async or sync migration mode */
 236        bool ignore_skip_hint;          /* Scan blocks even if marked skip */
 237        bool no_set_skip_hint;          /* Don't mark blocks for skipping */
 238        bool ignore_block_suitable;     /* Scan blocks considered unsuitable */
 239        bool direct_compaction;         /* False from kcompactd or /proc/... */
 240        bool proactive_compaction;      /* kcompactd proactive compaction */
 241        bool whole_zone;                /* Whole zone should/has been scanned */
 242        bool contended;                 /* Signal lock or sched contention */
 243        bool rescan;                    /* Rescanning the same pageblock */
 244        bool alloc_contig;              /* alloc_contig_range allocation */
 245};
 246
 247/*
 248 * Used in direct compaction when a page should be taken from the freelists
 249 * immediately when one is created during the free path.
 250 */
 251struct capture_control {
 252        struct compact_control *cc;
 253        struct page *page;
 254};
 255
 256unsigned long
 257isolate_freepages_range(struct compact_control *cc,
 258                        unsigned long start_pfn, unsigned long end_pfn);
 259unsigned long
 260isolate_migratepages_range(struct compact_control *cc,
 261                           unsigned long low_pfn, unsigned long end_pfn);
 262int find_suitable_fallback(struct free_area *area, unsigned int order,
 263                        int migratetype, bool only_stealable, bool *can_steal);
 264
 265#endif
 266
 267/*
 268 * This function returns the order of a free page in the buddy system. In
 269 * general, page_zone(page)->lock must be held by the caller to prevent the
 270 * page from being allocated in parallel and returning garbage as the order.
 271 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
 272 * page cannot be allocated or merged in parallel. Alternatively, it must
 273 * handle invalid values gracefully, and use buddy_order_unsafe() below.
 274 */
 275static inline unsigned int buddy_order(struct page *page)
 276{
 277        /* PageBuddy() must be checked by the caller */
 278        return page_private(page);
 279}
 280
 281/*
 282 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
 283 * PageBuddy() should be checked first by the caller to minimize race window,
 284 * and invalid values must be handled gracefully.
 285 *
 286 * READ_ONCE is used so that if the caller assigns the result into a local
 287 * variable and e.g. tests it for valid range before using, the compiler cannot
 288 * decide to remove the variable and inline the page_private(page) multiple
 289 * times, potentially observing different values in the tests and the actual
 290 * use of the result.
 291 */
 292#define buddy_order_unsafe(page)        READ_ONCE(page_private(page))
 293
 294static inline bool is_cow_mapping(vm_flags_t flags)
 295{
 296        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 297}
 298
 299/*
 300 * These three helpers classifies VMAs for virtual memory accounting.
 301 */
 302
 303/*
 304 * Executable code area - executable, not writable, not stack
 305 */
 306static inline bool is_exec_mapping(vm_flags_t flags)
 307{
 308        return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
 309}
 310
 311/*
 312 * Stack area - atomatically grows in one direction
 313 *
 314 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
 315 * do_mmap() forbids all other combinations.
 316 */
 317static inline bool is_stack_mapping(vm_flags_t flags)
 318{
 319        return (flags & VM_STACK) == VM_STACK;
 320}
 321
 322/*
 323 * Data area - private, writable, not stack
 324 */
 325static inline bool is_data_mapping(vm_flags_t flags)
 326{
 327        return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
 328}
 329
 330/* mm/util.c */
 331void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 332                struct vm_area_struct *prev);
 333void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
 334
 335#ifdef CONFIG_MMU
 336extern long populate_vma_page_range(struct vm_area_struct *vma,
 337                unsigned long start, unsigned long end, int *nonblocking);
 338extern void munlock_vma_pages_range(struct vm_area_struct *vma,
 339                        unsigned long start, unsigned long end);
 340static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
 341{
 342        munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
 343}
 344
 345/*
 346 * must be called with vma's mmap_lock held for read or write, and page locked.
 347 */
 348extern void mlock_vma_page(struct page *page);
 349extern unsigned int munlock_vma_page(struct page *page);
 350
 351/*
 352 * Clear the page's PageMlocked().  This can be useful in a situation where
 353 * we want to unconditionally remove a page from the pagecache -- e.g.,
 354 * on truncation or freeing.
 355 *
 356 * It is legal to call this function for any page, mlocked or not.
 357 * If called for a page that is still mapped by mlocked vmas, all we do
 358 * is revert to lazy LRU behaviour -- semantics are not broken.
 359 */
 360extern void clear_page_mlock(struct page *page);
 361
 362/*
 363 * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
 364 * (because that does not go through the full procedure of migration ptes):
 365 * to migrate the Mlocked page flag; update statistics.
 366 */
 367static inline void mlock_migrate_page(struct page *newpage, struct page *page)
 368{
 369        if (TestClearPageMlocked(page)) {
 370                int nr_pages = thp_nr_pages(page);
 371
 372                /* Holding pmd lock, no change in irq context: __mod is safe */
 373                __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
 374                SetPageMlocked(newpage);
 375                __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
 376        }
 377}
 378
 379extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 380
 381/*
 382 * At what user virtual address is page expected in @vma?
 383 */
 384static inline unsigned long
 385__vma_address(struct page *page, struct vm_area_struct *vma)
 386{
 387        pgoff_t pgoff = page_to_pgoff(page);
 388        return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 389}
 390
 391static inline unsigned long
 392vma_address(struct page *page, struct vm_area_struct *vma)
 393{
 394        unsigned long start, end;
 395
 396        start = __vma_address(page, vma);
 397        end = start + thp_size(page) - PAGE_SIZE;
 398
 399        /* page should be within @vma mapping range */
 400        VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
 401
 402        return max(start, vma->vm_start);
 403}
 404
 405static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
 406                                                    struct file *fpin)
 407{
 408        int flags = vmf->flags;
 409
 410        if (fpin)
 411                return fpin;
 412
 413        /*
 414         * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
 415         * anything, so we only pin the file and drop the mmap_lock if only
 416         * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
 417         */
 418        if (fault_flag_allow_retry_first(flags) &&
 419            !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
 420                fpin = get_file(vmf->vma->vm_file);
 421                mmap_read_unlock(vmf->vma->vm_mm);
 422        }
 423        return fpin;
 424}
 425
 426#else /* !CONFIG_MMU */
 427static inline void clear_page_mlock(struct page *page) { }
 428static inline void mlock_vma_page(struct page *page) { }
 429static inline void mlock_migrate_page(struct page *new, struct page *old) { }
 430
 431#endif /* !CONFIG_MMU */
 432
 433/*
 434 * Return the mem_map entry representing the 'offset' subpage within
 435 * the maximally aligned gigantic page 'base'.  Handle any discontiguity
 436 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
 437 */
 438static inline struct page *mem_map_offset(struct page *base, int offset)
 439{
 440        if (unlikely(offset >= MAX_ORDER_NR_PAGES))
 441                return nth_page(base, offset);
 442        return base + offset;
 443}
 444
 445/*
 446 * Iterator over all subpages within the maximally aligned gigantic
 447 * page 'base'.  Handle any discontiguity in the mem_map.
 448 */
 449static inline struct page *mem_map_next(struct page *iter,
 450                                                struct page *base, int offset)
 451{
 452        if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
 453                unsigned long pfn = page_to_pfn(base) + offset;
 454                if (!pfn_valid(pfn))
 455                        return NULL;
 456                return pfn_to_page(pfn);
 457        }
 458        return iter + 1;
 459}
 460
 461/* Memory initialisation debug and verification */
 462enum mminit_level {
 463        MMINIT_WARNING,
 464        MMINIT_VERIFY,
 465        MMINIT_TRACE
 466};
 467
 468#ifdef CONFIG_DEBUG_MEMORY_INIT
 469
 470extern int mminit_loglevel;
 471
 472#define mminit_dprintk(level, prefix, fmt, arg...) \
 473do { \
 474        if (level < mminit_loglevel) { \
 475                if (level <= MMINIT_WARNING) \
 476                        pr_warn("mminit::" prefix " " fmt, ##arg);      \
 477                else \
 478                        printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
 479        } \
 480} while (0)
 481
 482extern void mminit_verify_pageflags_layout(void);
 483extern void mminit_verify_zonelist(void);
 484#else
 485
 486static inline void mminit_dprintk(enum mminit_level level,
 487                                const char *prefix, const char *fmt, ...)
 488{
 489}
 490
 491static inline void mminit_verify_pageflags_layout(void)
 492{
 493}
 494
 495static inline void mminit_verify_zonelist(void)
 496{
 497}
 498#endif /* CONFIG_DEBUG_MEMORY_INIT */
 499
 500/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
 501#if defined(CONFIG_SPARSEMEM)
 502extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
 503                                unsigned long *end_pfn);
 504#else
 505static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
 506                                unsigned long *end_pfn)
 507{
 508}
 509#endif /* CONFIG_SPARSEMEM */
 510
 511#define NODE_RECLAIM_NOSCAN     -2
 512#define NODE_RECLAIM_FULL       -1
 513#define NODE_RECLAIM_SOME       0
 514#define NODE_RECLAIM_SUCCESS    1
 515
 516#ifdef CONFIG_NUMA
 517extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
 518#else
 519static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
 520                                unsigned int order)
 521{
 522        return NODE_RECLAIM_NOSCAN;
 523}
 524#endif
 525
 526extern int hwpoison_filter(struct page *p);
 527
 528extern u32 hwpoison_filter_dev_major;
 529extern u32 hwpoison_filter_dev_minor;
 530extern u64 hwpoison_filter_flags_mask;
 531extern u64 hwpoison_filter_flags_value;
 532extern u64 hwpoison_filter_memcg;
 533extern u32 hwpoison_filter_enable;
 534
 535extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
 536        unsigned long, unsigned long,
 537        unsigned long, unsigned long);
 538
 539extern void set_pageblock_order(void);
 540unsigned int reclaim_clean_pages_from_list(struct zone *zone,
 541                                            struct list_head *page_list);
 542/* The ALLOC_WMARK bits are used as an index to zone->watermark */
 543#define ALLOC_WMARK_MIN         WMARK_MIN
 544#define ALLOC_WMARK_LOW         WMARK_LOW
 545#define ALLOC_WMARK_HIGH        WMARK_HIGH
 546#define ALLOC_NO_WATERMARKS     0x04 /* don't check watermarks at all */
 547
 548/* Mask to get the watermark bits */
 549#define ALLOC_WMARK_MASK        (ALLOC_NO_WATERMARKS-1)
 550
 551/*
 552 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
 553 * cannot assume a reduced access to memory reserves is sufficient for
 554 * !MMU
 555 */
 556#ifdef CONFIG_MMU
 557#define ALLOC_OOM               0x08
 558#else
 559#define ALLOC_OOM               ALLOC_NO_WATERMARKS
 560#endif
 561
 562#define ALLOC_HARDER             0x10 /* try to alloc harder */
 563#define ALLOC_HIGH               0x20 /* __GFP_HIGH set */
 564#define ALLOC_CPUSET             0x40 /* check for correct cpuset */
 565#define ALLOC_CMA                0x80 /* allow allocations from CMA areas */
 566#ifdef CONFIG_ZONE_DMA32
 567#define ALLOC_NOFRAGMENT        0x100 /* avoid mixing pageblock types */
 568#else
 569#define ALLOC_NOFRAGMENT          0x0
 570#endif
 571#define ALLOC_KSWAPD            0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
 572
 573enum ttu_flags;
 574struct tlbflush_unmap_batch;
 575
 576
 577/*
 578 * only for MM internal work items which do not depend on
 579 * any allocations or locks which might depend on allocations
 580 */
 581extern struct workqueue_struct *mm_percpu_wq;
 582
 583#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 584void try_to_unmap_flush(void);
 585void try_to_unmap_flush_dirty(void);
 586void flush_tlb_batched_pending(struct mm_struct *mm);
 587#else
 588static inline void try_to_unmap_flush(void)
 589{
 590}
 591static inline void try_to_unmap_flush_dirty(void)
 592{
 593}
 594static inline void flush_tlb_batched_pending(struct mm_struct *mm)
 595{
 596}
 597#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 598
 599extern const struct trace_print_flags pageflag_names[];
 600extern const struct trace_print_flags vmaflag_names[];
 601extern const struct trace_print_flags gfpflag_names[];
 602
 603static inline bool is_migrate_highatomic(enum migratetype migratetype)
 604{
 605        return migratetype == MIGRATE_HIGHATOMIC;
 606}
 607
 608static inline bool is_migrate_highatomic_page(struct page *page)
 609{
 610        return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
 611}
 612
 613void setup_zone_pageset(struct zone *zone);
 614
 615struct migration_target_control {
 616        int nid;                /* preferred node id */
 617        nodemask_t *nmask;
 618        gfp_t gfp_mask;
 619};
 620
 621#endif  /* __MM_INTERNAL_H */
 622