1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmdebug.h>
5#include <linux/mmzone.h>
6#include <linux/stddef.h>
7#include <linux/linkage.h>
8#include <linux/topology.h>
9
10struct vm_area_struct;
11
12
13
14
15
16
17
18#define ___GFP_DMA 0x01u
19#define ___GFP_HIGHMEM 0x02u
20#define ___GFP_DMA32 0x04u
21#define ___GFP_MOVABLE 0x08u
22#define ___GFP_RECLAIMABLE 0x10u
23#define ___GFP_HIGH 0x20u
24#define ___GFP_IO 0x40u
25#define ___GFP_FS 0x80u
26#define ___GFP_COLD 0x100u
27#define ___GFP_NOWARN 0x200u
28#define ___GFP_REPEAT 0x400u
29#define ___GFP_NOFAIL 0x800u
30#define ___GFP_NORETRY 0x1000u
31#define ___GFP_MEMALLOC 0x2000u
32#define ___GFP_COMP 0x4000u
33#define ___GFP_ZERO 0x8000u
34#define ___GFP_NOMEMALLOC 0x10000u
35#define ___GFP_HARDWALL 0x20000u
36#define ___GFP_THISNODE 0x40000u
37#define ___GFP_ATOMIC 0x80000u
38#define ___GFP_ACCOUNT 0x100000u
39#define ___GFP_NOTRACK 0x200000u
40#define ___GFP_DIRECT_RECLAIM 0x400000u
41#define ___GFP_WRITE 0x800000u
42#define ___GFP_KSWAPD_RECLAIM 0x1000000u
43
44
45
46
47
48
49
50
51
52#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
53#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
54#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
55#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE)
56#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
83#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
84#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
85#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
86#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
108#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
109#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
110#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148#define __GFP_IO ((__force gfp_t)___GFP_IO)
149#define __GFP_FS ((__force gfp_t)___GFP_FS)
150#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM)
151#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM)
152#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
153#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
154#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
155#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
176#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
177#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
178#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
179#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
180#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
181
182
183#define __GFP_BITS_SHIFT 25
184#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
240#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
241#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
242#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
243#define GFP_NOIO (__GFP_RECLAIM)
244#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
245#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
246 __GFP_RECLAIMABLE)
247#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
248#define GFP_DMA __GFP_DMA
249#define GFP_DMA32 __GFP_DMA32
250#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
251#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
252#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
253 __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
254#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
255
256
257#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
258#define GFP_MOVABLE_SHIFT 3
259
260static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
261{
262 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
263 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
264 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
265
266 if (unlikely(page_group_by_mobility_disabled))
267 return MIGRATE_UNMOVABLE;
268
269
270 return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
271}
272#undef GFP_MOVABLE_MASK
273#undef GFP_MOVABLE_SHIFT
274
275static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
276{
277 return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
278}
279
280#ifdef CONFIG_HIGHMEM
281#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
282#else
283#define OPT_ZONE_HIGHMEM ZONE_NORMAL
284#endif
285
286#ifdef CONFIG_ZONE_DMA
287#define OPT_ZONE_DMA ZONE_DMA
288#else
289#define OPT_ZONE_DMA ZONE_NORMAL
290#endif
291
292#ifdef CONFIG_ZONE_DMA32
293#define OPT_ZONE_DMA32 ZONE_DMA32
294#else
295#define OPT_ZONE_DMA32 ZONE_NORMAL
296#endif
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
332
333#define GFP_ZONES_SHIFT 2
334#else
335#define GFP_ZONES_SHIFT ZONES_SHIFT
336#endif
337
338#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
339#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
340#endif
341
342#define GFP_ZONE_TABLE ( \
343 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
344 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
345 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
346 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
347 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
348 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
349 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
350 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
351)
352
353
354
355
356
357
358
359#define GFP_ZONE_BAD ( \
360 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
361 | 1 << (___GFP_DMA | ___GFP_DMA32) \
362 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
363 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
364 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
365 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
366 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
367 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
368)
369
370static inline enum zone_type gfp_zone(gfp_t flags)
371{
372 enum zone_type z;
373 int bit = (__force int) (flags & GFP_ZONEMASK);
374
375 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
376 ((1 << GFP_ZONES_SHIFT) - 1);
377 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
378 return z;
379}
380
381
382
383
384
385
386
387
388static inline int gfp_zonelist(gfp_t flags)
389{
390#ifdef CONFIG_NUMA
391 if (unlikely(flags & __GFP_THISNODE))
392 return ZONELIST_NOFALLBACK;
393#endif
394 return ZONELIST_FALLBACK;
395}
396
397
398
399
400
401
402
403
404
405
406static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
407{
408 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
409}
410
411#ifndef HAVE_ARCH_FREE_PAGE
412static inline void arch_free_page(struct page *page, int order) { }
413#endif
414#ifndef HAVE_ARCH_ALLOC_PAGE
415static inline void arch_alloc_page(struct page *page, int order) { }
416#endif
417
418struct page *
419__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
420 struct zonelist *zonelist, nodemask_t *nodemask);
421
422static inline struct page *
423__alloc_pages(gfp_t gfp_mask, unsigned int order,
424 struct zonelist *zonelist)
425{
426 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
427}
428
429
430
431
432
433static inline struct page *
434__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
435{
436 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
437 VM_WARN_ON(!node_online(nid));
438
439 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
440}
441
442
443
444
445
446
447static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
448 unsigned int order)
449{
450 if (nid == NUMA_NO_NODE)
451 nid = numa_mem_id();
452
453 return __alloc_pages_node(nid, gfp_mask, order);
454}
455
456#ifdef CONFIG_NUMA
457extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
458
459static inline struct page *
460alloc_pages(gfp_t gfp_mask, unsigned int order)
461{
462 return alloc_pages_current(gfp_mask, order);
463}
464extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
465 struct vm_area_struct *vma, unsigned long addr,
466 int node, bool hugepage);
467#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
468 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
469#else
470#define alloc_pages(gfp_mask, order) \
471 alloc_pages_node(numa_node_id(), gfp_mask, order)
472#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
473 alloc_pages(gfp_mask, order)
474#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
475 alloc_pages(gfp_mask, order)
476#endif
477#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
478#define alloc_page_vma(gfp_mask, vma, addr) \
479 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
480#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
481 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
482
483extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
484extern unsigned long get_zeroed_page(gfp_t gfp_mask);
485
486void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
487void free_pages_exact(void *virt, size_t size);
488void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
489
490#define __get_free_page(gfp_mask) \
491 __get_free_pages((gfp_mask), 0)
492
493#define __get_dma_pages(gfp_mask, order) \
494 __get_free_pages((gfp_mask) | GFP_DMA, (order))
495
496extern void __free_pages(struct page *page, unsigned int order);
497extern void free_pages(unsigned long addr, unsigned int order);
498extern void free_hot_cold_page(struct page *page, bool cold);
499extern void free_hot_cold_page_list(struct list_head *list, bool cold);
500
501struct page_frag_cache;
502extern void __page_frag_cache_drain(struct page *page, unsigned int count);
503extern void *page_frag_alloc(struct page_frag_cache *nc,
504 unsigned int fragsz, gfp_t gfp_mask);
505extern void page_frag_free(void *addr);
506
507#define __free_page(page) __free_pages((page), 0)
508#define free_page(addr) free_pages((addr), 0)
509
510void page_alloc_init(void);
511void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
512void drain_all_pages(struct zone *zone);
513void drain_local_pages(struct zone *zone);
514
515void page_alloc_init_late(void);
516
517
518
519
520
521
522
523
524extern gfp_t gfp_allowed_mask;
525
526
527bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
528
529extern void pm_restrict_gfp_mask(void);
530extern void pm_restore_gfp_mask(void);
531
532#ifdef CONFIG_PM_SLEEP
533extern bool pm_suspended_storage(void);
534#else
535static inline bool pm_suspended_storage(void)
536{
537 return false;
538}
539#endif
540
541#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
542
543extern int alloc_contig_range(unsigned long start, unsigned long end,
544 unsigned migratetype);
545extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
546#endif
547
548#ifdef CONFIG_CMA
549
550extern void init_cma_reserved_pageblock(struct page *page);
551#endif
552
553#endif
554