1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_GFP_H 3#define __LINUX_GFP_H 4 5#include <linux/mmdebug.h> 6#include <linux/mmzone.h> 7#include <linux/stddef.h> 8#include <linux/linkage.h> 9#include <linux/topology.h> 10 11struct vm_area_struct; 12 13/* 14 * In case of changes, please don't forget to update 15 * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c 16 */ 17 18/* Plain integer GFP bitmasks. Do not use this directly. */ 19#define ___GFP_DMA 0x01u 20#define ___GFP_HIGHMEM 0x02u 21#define ___GFP_DMA32 0x04u 22#define ___GFP_MOVABLE 0x08u 23#define ___GFP_RECLAIMABLE 0x10u 24#define ___GFP_HIGH 0x20u 25#define ___GFP_IO 0x40u 26#define ___GFP_FS 0x80u 27#define ___GFP_COLD 0x100u 28#define ___GFP_NOWARN 0x200u 29#define ___GFP_RETRY_MAYFAIL 0x400u 30#define ___GFP_NOFAIL 0x800u 31#define ___GFP_NORETRY 0x1000u 32#define ___GFP_MEMALLOC 0x2000u 33#define ___GFP_COMP 0x4000u 34#define ___GFP_ZERO 0x8000u 35#define ___GFP_NOMEMALLOC 0x10000u 36#define ___GFP_HARDWALL 0x20000u 37#define ___GFP_THISNODE 0x40000u 38#define ___GFP_ATOMIC 0x80000u 39#define ___GFP_ACCOUNT 0x100000u 40#define ___GFP_NOTRACK 0x200000u 41#define ___GFP_DIRECT_RECLAIM 0x400000u 42#define ___GFP_WRITE 0x800000u 43#define ___GFP_KSWAPD_RECLAIM 0x1000000u 44#ifdef CONFIG_LOCKDEP 45#define ___GFP_NOLOCKDEP 0x2000000u 46#else 47#define ___GFP_NOLOCKDEP 0 48#endif 49/* If the above are modified, __GFP_BITS_SHIFT may need updating */ 50 51/* 52 * Physical address zone modifiers (see linux/mmzone.h - low four bits) 53 * 54 * Do not put any conditional on these. If necessary modify the definitions 55 * without the underscores and use them consistently. The definitions here may 56 * be used in bit comparisons. 57 */ 58#define __GFP_DMA ((__force gfp_t)___GFP_DMA) 59#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) 60#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) 61#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ 62#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) 63 64/* 65 * Page mobility and placement hints 66 * 67 * These flags provide hints about how mobile the page is. Pages with similar 68 * mobility are placed within the same pageblocks to minimise problems due 69 * to external fragmentation. 70 * 71 * __GFP_MOVABLE (also a zone modifier) indicates that the page can be 72 * moved by page migration during memory compaction or can be reclaimed. 73 * 74 * __GFP_RECLAIMABLE is used for slab allocations that specify 75 * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. 76 * 77 * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, 78 * these pages will be spread between local zones to avoid all the dirty 79 * pages being in one zone (fair zone allocation policy). 80 * 81 * __GFP_HARDWALL enforces the cpuset memory allocation policy. 82 * 83 * __GFP_THISNODE forces the allocation to be satisified from the requested 84 * node with no fallbacks or placement policy enforcements. 85 * 86 * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg. 87 */ 88#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) 89#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) 90#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) 91#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) 92#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) 93 94/* 95 * Watermark modifiers -- controls access to emergency reserves 96 * 97 * __GFP_HIGH indicates that the caller is high-priority and that granting 98 * the request is necessary before the system can make forward progress. 99 * For example, creating an IO context to clean pages. 100 * 101 * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is 102 * high priority. Users are typically interrupt handlers. This may be 103 * used in conjunction with __GFP_HIGH 104 * 105 * __GFP_MEMALLOC allows access to all memory. This should only be used when 106 * the caller guarantees the allocation will allow more memory to be freed 107 * very shortly e.g. process exiting or swapping. Users either should 108 * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). 109 * 110 * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. 111 * This takes precedence over the __GFP_MEMALLOC flag if both are set. 112 */ 113#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) 114#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) 115#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) 116#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) 117 118/* 119 * Reclaim modifiers 120 * 121 * __GFP_IO can start physical IO. 122 * 123 * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the 124 * allocator recursing into the filesystem which might already be holding 125 * locks. 126 * 127 * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. 128 * This flag can be cleared to avoid unnecessary delays when a fallback 129 * option is available. 130 * 131 * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when 132 * the low watermark is reached and have it reclaim pages until the high 133 * watermark is reached. A caller may wish to clear this flag when fallback 134 * options are available and the reclaim is likely to disrupt the system. The 135 * canonical example is THP allocation where a fallback is cheap but 136 * reclaim/compaction may cause indirect stalls. 137 * 138 * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. 139 * 140 * The default allocator behavior depends on the request size. We have a concept 141 * of so called costly allocations (with order > PAGE_ALLOC_COSTLY_ORDER). 142 * !costly allocations are too essential to fail so they are implicitly 143 * non-failing by default (with some exceptions like OOM victims might fail so 144 * the caller still has to check for failures) while costly requests try to be 145 * not disruptive and back off even without invoking the OOM killer. 146 * The following three modifiers might be used to override some of these 147 * implicit rules 148 * 149 * __GFP_NORETRY: The VM implementation will try only very lightweight 150 * memory direct reclaim to get some memory under memory pressure (thus 151 * it can sleep). It will avoid disruptive actions like OOM killer. The 152 * caller must handle the failure which is quite likely to happen under 153 * heavy memory pressure. The flag is suitable when failure can easily be 154 * handled at small cost, such as reduced throughput 155 * 156 * __GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim 157 * procedures that have previously failed if there is some indication 158 * that progress has been made else where. It can wait for other 159 * tasks to attempt high level approaches to freeing memory such as 160 * compaction (which removes fragmentation) and page-out. 161 * There is still a definite limit to the number of retries, but it is 162 * a larger limit than with __GFP_NORETRY. 163 * Allocations with this flag may fail, but only when there is 164 * genuinely little unused memory. While these allocations do not 165 * directly trigger the OOM killer, their failure indicates that 166 * the system is likely to need to use the OOM killer soon. The 167 * caller must handle failure, but can reasonably do so by failing 168 * a higher-level request, or completing it only in a much less 169 * efficient manner. 170 * If the allocation does fail, and the caller is in a position to 171 * free some non-essential memory, doing so could benefit the system 172 * as a whole. 173 * 174 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller 175 * cannot handle allocation failures. The allocation could block 176 * indefinitely but will never return with failure. Testing for 177 * failure is pointless. 178 * New users should be evaluated carefully (and the flag should be 179 * used only when there is no reasonable failure policy) but it is 180 * definitely preferable to use the flag rather than opencode endless 181 * loop around allocator. 182 * Using this flag for costly allocations is _highly_ discouraged. 183 */ 184#define __GFP_IO ((__force gfp_t)___GFP_IO) 185#define __GFP_FS ((__force gfp_t)___GFP_FS) 186#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ 187#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ 188#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) 189#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) 190#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) 191#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) 192 193/* 194 * Action modifiers 195 * 196 * __GFP_COLD indicates that the caller does not expect to be used in the near 197 * future. Where possible, a cache-cold page will be returned. 198 * 199 * __GFP_NOWARN suppresses allocation failure reports. 200 * 201 * __GFP_COMP address compound page metadata. 202 * 203 * __GFP_ZERO returns a zeroed page on success. 204 * 205 * __GFP_NOTRACK avoids tracking with kmemcheck. 206 * 207 * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of 208 * distinguishing in the source between false positives and allocations that 209 * cannot be supported (e.g. page tables). 210 */ 211#define __GFP_COLD ((__force gfp_t)___GFP_COLD) 212#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) 213#define __GFP_COMP ((__force gfp_t)___GFP_COMP) 214#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) 215#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) 216#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) 217 218/* Disable lockdep for GFP context tracking */ 219#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) 220 221/* Room for N __GFP_FOO bits */ 222#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP)) 223#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 224 225/* 226 * Useful GFP flag combinations that are commonly used. It is recommended 227 * that subsystems start with one of these combinations and then set/clear 228 * __GFP_FOO flags as necessary. 229 * 230 * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower 231 * watermark is applied to allow access to "atomic reserves" 232 * 233 * GFP_KERNEL is typical for kernel-internal allocations. The caller requires 234 * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. 235 * 236 * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is 237 * accounted to kmemcg. 238 * 239 * GFP_NOWAIT is for kernel allocations that should not stall for direct 240 * reclaim, start physical IO or use any filesystem callback. 241 * 242 * GFP_NOIO will use direct reclaim to discard clean pages or slab pages 243 * that do not require the starting of any physical IO. 244 * Please try to avoid using this flag directly and instead use 245 * memalloc_noio_{save,restore} to mark the whole scope which cannot 246 * perform any IO with a short explanation why. All allocation requests 247 * will inherit GFP_NOIO implicitly. 248 * 249 * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. 250 * Please try to avoid using this flag directly and instead use 251 * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't 252 * recurse into the FS layer with a short explanation why. All allocation 253 * requests will inherit GFP_NOFS implicitly. 254 * 255 * GFP_USER is for userspace allocations that also need to be directly 256 * accessibly by the kernel or hardware. It is typically used by hardware 257 * for buffers that are mapped to userspace (e.g. graphics) that hardware 258 * still must DMA to. cpuset limits are enforced for these allocations. 259 * 260 * GFP_DMA exists for historical reasons and should be avoided where possible. 261 * The flags indicates that the caller requires that the lowest zone be 262 * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but 263 * it would require careful auditing as some users really require it and 264 * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the 265 * lowest zone as a type of emergency reserve. 266 * 267 * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit 268 * address. 269 * 270 * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, 271 * do not need to be directly accessible by the kernel but that cannot 272 * move once in use. An example may be a hardware allocation that maps 273 * data directly into userspace but has no addressing limitations. 274 * 275 * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not 276 * need direct access to but can use kmap() when access is required. They 277 * are expected to be movable via page reclaim or page migration. Typically, 278 * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. 279 * 280 * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are 281 * compound allocations that will generally fail quickly if memory is not 282 * available and will not wake kswapd/kcompactd on failure. The _LIGHT 283 * version does not attempt reclaim/compaction at all and is by default used 284 * in page fault path, while the non-light is used by khugepaged. 285 */ 286#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) 287#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) 288#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) 289#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) 290#define GFP_NOIO (__GFP_RECLAIM) 291#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) 292#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 293#define GFP_DMA __GFP_DMA 294#define GFP_DMA32 __GFP_DMA32 295#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) 296#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) 297#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ 298 __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) 299#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) 300 301/* Convert GFP flags to their corresponding migrate type */ 302#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) 303#define GFP_MOVABLE_SHIFT 3 304 305static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) 306{ 307 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); 308 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); 309 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); 310 311 if (unlikely(page_group_by_mobility_disabled)) 312 return MIGRATE_UNMOVABLE; 313 314 /* Group based on mobility */ 315 return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; 316} 317#undef GFP_MOVABLE_MASK 318#undef GFP_MOVABLE_SHIFT 319 320static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 321{ 322 return !!(gfp_flags & __GFP_DIRECT_RECLAIM); 323} 324 325#ifdef CONFIG_HIGHMEM 326#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM 327#else 328#define OPT_ZONE_HIGHMEM ZONE_NORMAL 329#endif 330 331#ifdef CONFIG_ZONE_DMA 332#define OPT_ZONE_DMA ZONE_DMA 333#else 334#define OPT_ZONE_DMA ZONE_NORMAL 335#endif 336 337#ifdef CONFIG_ZONE_DMA32 338#define OPT_ZONE_DMA32 ZONE_DMA32 339#else 340#define OPT_ZONE_DMA32 ZONE_NORMAL 341#endif 342 343/* 344 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the 345 * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT 346 * bits long and there are 16 of them to cover all possible combinations of 347 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. 348 * 349 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. 350 * But GFP_MOVABLE is not only a zone specifier but also an allocation 351 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. 352 * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". 353 * 354 * bit result 355 * ================= 356 * 0x0 => NORMAL 357 * 0x1 => DMA or NORMAL 358 * 0x2 => HIGHMEM or NORMAL 359 * 0x3 => BAD (DMA+HIGHMEM) 360 * 0x4 => DMA32 or DMA or NORMAL 361 * 0x5 => BAD (DMA+DMA32) 362 * 0x6 => BAD (HIGHMEM+DMA32) 363 * 0x7 => BAD (HIGHMEM+DMA32+DMA) 364 * 0x8 => NORMAL (MOVABLE+0) 365 * 0x9 => DMA or NORMAL (MOVABLE+DMA) 366 * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) 367 * 0xb => BAD (MOVABLE+HIGHMEM+DMA) 368 * 0xc => DMA32 (MOVABLE+DMA32) 369 * 0xd => BAD (MOVABLE+DMA32+DMA) 370 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) 371 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) 372 * 373 * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. 374 */ 375 376#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 377/* ZONE_DEVICE is not a valid GFP zone specifier */ 378#define GFP_ZONES_SHIFT 2 379#else 380#define GFP_ZONES_SHIFT ZONES_SHIFT 381#endif 382 383#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG 384#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer 385#endif 386 387#define GFP_ZONE_TABLE ( \ 388 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ 389 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ 390 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ 391 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ 392 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ 393 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ 394 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ 395 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ 396) 397 398/* 399 * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 400 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per 401 * entry starting with bit 0. Bit is set if the combination is not 402 * allowed. 403 */ 404#define GFP_ZONE_BAD ( \ 405 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ 406 | 1 << (___GFP_DMA | ___GFP_DMA32) \ 407 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ 408 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ 409 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ 410 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ 411 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ 412 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ 413) 414 415static inline enum zone_type gfp_zone(gfp_t flags) 416{ 417 enum zone_type z; 418 int bit = (__force int) (flags & GFP_ZONEMASK); 419 420 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & 421 ((1 << GFP_ZONES_SHIFT) - 1); 422 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); 423 return z; 424} 425 426/* 427 * There is only one page-allocator function, and two main namespaces to 428 * it. The alloc_page*() variants return 'struct page *' and as such 429 * can allocate highmem pages, the *get*page*() variants return 430 * virtual kernel addresses to the allocated page(s). 431 */ 432 433static inline int gfp_zonelist(gfp_t flags) 434{ 435#ifdef CONFIG_NUMA 436 if (unlikely(flags & __GFP_THISNODE)) 437 return ZONELIST_NOFALLBACK; 438#endif 439 return ZONELIST_FALLBACK; 440} 441 442/* 443 * We get the zone list from the current node and the gfp_mask. 444 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. 445 * There are two zonelists per node, one for all zones with memory and 446 * one containing just zones from the node the zonelist belongs to. 447 * 448 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets 449 * optimized to &contig_page_data at compile-time. 450 */ 451static inline struct zonelist *node_zonelist(int nid, gfp_t flags) 452{ 453 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); 454} 455 456#ifndef HAVE_ARCH_FREE_PAGE 457static inline void arch_free_page(struct page *page, int order) { } 458#endif 459#ifndef HAVE_ARCH_ALLOC_PAGE 460static inline void arch_alloc_page(struct page *page, int order) { } 461#endif 462 463struct page * 464__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, 465 nodemask_t *nodemask); 466 467static inline struct page * 468__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) 469{ 470 return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); 471} 472 473/* 474 * Allocate pages, preferring the node given as nid. The node must be valid and 475 * online. For more general interface, see alloc_pages_node(). 476 */ 477static inline struct page * 478__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 479{ 480 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); 481 VM_WARN_ON(!node_online(nid)); 482 483 return __alloc_pages(gfp_mask, order, nid); 484} 485 486/* 487 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, 488 * prefer the current CPU's closest node. Otherwise node must be valid and 489 * online. 490 */ 491static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 492 unsigned int order) 493{ 494 if (nid == NUMA_NO_NODE) 495 nid = numa_mem_id(); 496 497 return __alloc_pages_node(nid, gfp_mask, order); 498} 499 500#ifdef CONFIG_NUMA 501extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); 502 503static inline struct page * 504alloc_pages(gfp_t gfp_mask, unsigned int order) 505{ 506 return alloc_pages_current(gfp_mask, order); 507} 508extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 509 struct vm_area_struct *vma, unsigned long addr, 510 int node, bool hugepage); 511#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ 512 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) 513#else 514#define alloc_pages(gfp_mask, order) \ 515 alloc_pages_node(numa_node_id(), gfp_mask, order) 516#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ 517 alloc_pages(gfp_mask, order) 518#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ 519 alloc_pages(gfp_mask, order) 520#endif 521#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 522#define alloc_page_vma(gfp_mask, vma, addr) \ 523 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) 524#define alloc_page_vma_node(gfp_mask, vma, addr, node) \ 525 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) 526 527extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 528extern unsigned long get_zeroed_page(gfp_t gfp_mask); 529 530void *alloc_pages_exact(size_t size, gfp_t gfp_mask); 531void free_pages_exact(void *virt, size_t size); 532void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); 533 534#define __get_free_page(gfp_mask) \ 535 __get_free_pages((gfp_mask), 0) 536 537#define __get_dma_pages(gfp_mask, order) \ 538 __get_free_pages((gfp_mask) | GFP_DMA, (order)) 539 540extern void __free_pages(struct page *page, unsigned int order); 541extern void free_pages(unsigned long addr, unsigned int order); 542extern void free_hot_cold_page(struct page *page, bool cold); 543extern void free_hot_cold_page_list(struct list_head *list, bool cold); 544 545struct page_frag_cache; 546extern void __page_frag_cache_drain(struct page *page, unsigned int count); 547extern void *page_frag_alloc(struct page_frag_cache *nc, 548 unsigned int fragsz, gfp_t gfp_mask); 549extern void page_frag_free(void *addr); 550 551#define __free_page(page) __free_pages((page), 0) 552#define free_page(addr) free_pages((addr), 0) 553 554void page_alloc_init(void); 555void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); 556void drain_all_pages(struct zone *zone); 557void drain_local_pages(struct zone *zone); 558 559void page_alloc_init_late(void); 560 561/* 562 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 563 * GFP flags are used before interrupts are enabled. Once interrupts are 564 * enabled, it is set to __GFP_BITS_MASK while the system is running. During 565 * hibernation, it is used by PM to avoid I/O during memory allocation while 566 * devices are suspended. 567 */ 568extern gfp_t gfp_allowed_mask; 569 570/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ 571bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); 572 573extern void pm_restrict_gfp_mask(void); 574extern void pm_restore_gfp_mask(void); 575 576#ifdef CONFIG_PM_SLEEP 577extern bool pm_suspended_storage(void); 578#else 579static inline bool pm_suspended_storage(void) 580{ 581 return false; 582} 583#endif /* CONFIG_PM_SLEEP */ 584 585#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) 586/* The below functions must be run on a range from a single zone. */ 587extern int alloc_contig_range(unsigned long start, unsigned long end, 588 unsigned migratetype, gfp_t gfp_mask); 589extern void free_contig_range(unsigned long pfn, unsigned nr_pages); 590#endif 591 592#ifdef CONFIG_CMA 593/* CMA stuff */ 594extern void init_cma_reserved_pageblock(struct page *page); 595#endif 596 597#endif /* __LINUX_GFP_H */ 598