1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmdebug.h>
5#include <linux/mmzone.h>
6#include <linux/stddef.h>
7#include <linux/linkage.h>
8#include <linux/topology.h>
9
10struct vm_area_struct;
11
12
13
14
15
16
17
18#define ___GFP_DMA 0x01u
19#define ___GFP_HIGHMEM 0x02u
20#define ___GFP_DMA32 0x04u
21#define ___GFP_MOVABLE 0x08u
22#define ___GFP_RECLAIMABLE 0x10u
23#define ___GFP_HIGH 0x20u
24#define ___GFP_IO 0x40u
25#define ___GFP_FS 0x80u
26#define ___GFP_COLD 0x100u
27#define ___GFP_NOWARN 0x200u
28#define ___GFP_REPEAT 0x400u
29#define ___GFP_NOFAIL 0x800u
30#define ___GFP_NORETRY 0x1000u
31#define ___GFP_MEMALLOC 0x2000u
32#define ___GFP_COMP 0x4000u
33#define ___GFP_ZERO 0x8000u
34#define ___GFP_NOMEMALLOC 0x10000u
35#define ___GFP_HARDWALL 0x20000u
36#define ___GFP_THISNODE 0x40000u
37#define ___GFP_ATOMIC 0x80000u
38#define ___GFP_ACCOUNT 0x100000u
39#define ___GFP_NOTRACK 0x200000u
40#define ___GFP_DIRECT_RECLAIM 0x400000u
41#define ___GFP_OTHER_NODE 0x800000u
42#define ___GFP_WRITE 0x1000000u
43#define ___GFP_KSWAPD_RECLAIM 0x2000000u
44
45
46
47
48
49
50
51
52
53#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
54#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
55#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
56#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE)
57#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
85#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
86#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
87#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
88#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
110#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
111#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
112#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150#define __GFP_IO ((__force gfp_t)___GFP_IO)
151#define __GFP_FS ((__force gfp_t)___GFP_FS)
152#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM)
153#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM)
154#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
155#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
156#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
157#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
183#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
184#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
185#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
186#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
187#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
188#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
189
190
191#define __GFP_BITS_SHIFT 26
192#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
246#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
247#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
248#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
249#define GFP_NOIO (__GFP_RECLAIM)
250#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
251#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
252 __GFP_RECLAIMABLE)
253#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
254#define GFP_DMA __GFP_DMA
255#define GFP_DMA32 __GFP_DMA32
256#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
257#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
258#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
259 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
260 ~__GFP_RECLAIM)
261
262
263#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
264#define GFP_MOVABLE_SHIFT 3
265
266static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
267{
268 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
269 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
270 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
271
272 if (unlikely(page_group_by_mobility_disabled))
273 return MIGRATE_UNMOVABLE;
274
275
276 return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
277}
278#undef GFP_MOVABLE_MASK
279#undef GFP_MOVABLE_SHIFT
280
281static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
282{
283 return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
284}
285
286#ifdef CONFIG_HIGHMEM
287#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
288#else
289#define OPT_ZONE_HIGHMEM ZONE_NORMAL
290#endif
291
292#ifdef CONFIG_ZONE_DMA
293#define OPT_ZONE_DMA ZONE_DMA
294#else
295#define OPT_ZONE_DMA ZONE_NORMAL
296#endif
297
298#ifdef CONFIG_ZONE_DMA32
299#define OPT_ZONE_DMA32 ZONE_DMA32
300#else
301#define OPT_ZONE_DMA32 ZONE_NORMAL
302#endif
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
338
339#define GFP_ZONES_SHIFT 2
340#else
341#define GFP_ZONES_SHIFT ZONES_SHIFT
342#endif
343
344#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
345#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
346#endif
347
348#define GFP_ZONE_TABLE ( \
349 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
350 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
351 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
352 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
353 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
354 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
355 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
356 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
357)
358
359
360
361
362
363
364
365#define GFP_ZONE_BAD ( \
366 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
367 | 1 << (___GFP_DMA | ___GFP_DMA32) \
368 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
369 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
370 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
371 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
372 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
373 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
374)
375
376static inline enum zone_type gfp_zone(gfp_t flags)
377{
378 enum zone_type z;
379 int bit = (__force int) (flags & GFP_ZONEMASK);
380
381 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
382 ((1 << GFP_ZONES_SHIFT) - 1);
383 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
384 return z;
385}
386
387
388
389
390
391
392
393
394static inline int gfp_zonelist(gfp_t flags)
395{
396#ifdef CONFIG_NUMA
397 if (unlikely(flags & __GFP_THISNODE))
398 return ZONELIST_NOFALLBACK;
399#endif
400 return ZONELIST_FALLBACK;
401}
402
403
404
405
406
407
408
409
410
411
412static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
413{
414 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
415}
416
417#ifndef HAVE_ARCH_FREE_PAGE
418static inline void arch_free_page(struct page *page, int order) { }
419#endif
420#ifndef HAVE_ARCH_ALLOC_PAGE
421static inline void arch_alloc_page(struct page *page, int order) { }
422#endif
423
424struct page *
425__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
426 struct zonelist *zonelist, nodemask_t *nodemask);
427
428static inline struct page *
429__alloc_pages(gfp_t gfp_mask, unsigned int order,
430 struct zonelist *zonelist)
431{
432 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
433}
434
435
436
437
438
439static inline struct page *
440__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
441{
442 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
443 VM_WARN_ON(!node_online(nid));
444
445 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
446}
447
448
449
450
451
452
453static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
454 unsigned int order)
455{
456 if (nid == NUMA_NO_NODE)
457 nid = numa_mem_id();
458
459 return __alloc_pages_node(nid, gfp_mask, order);
460}
461
462#ifdef CONFIG_NUMA
463extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
464
465static inline struct page *
466alloc_pages(gfp_t gfp_mask, unsigned int order)
467{
468 return alloc_pages_current(gfp_mask, order);
469}
470extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
471 struct vm_area_struct *vma, unsigned long addr,
472 int node, bool hugepage);
473#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
474 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
475#else
476#define alloc_pages(gfp_mask, order) \
477 alloc_pages_node(numa_node_id(), gfp_mask, order)
478#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
479 alloc_pages(gfp_mask, order)
480#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
481 alloc_pages(gfp_mask, order)
482#endif
483#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
484#define alloc_page_vma(gfp_mask, vma, addr) \
485 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
486#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
487 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
488
489extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order);
490extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask,
491 unsigned int order);
492
493extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
494extern unsigned long get_zeroed_page(gfp_t gfp_mask);
495
496void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
497void free_pages_exact(void *virt, size_t size);
498void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
499
500#define __get_free_page(gfp_mask) \
501 __get_free_pages((gfp_mask), 0)
502
503#define __get_dma_pages(gfp_mask, order) \
504 __get_free_pages((gfp_mask) | GFP_DMA, (order))
505
506extern void __free_pages(struct page *page, unsigned int order);
507extern void free_pages(unsigned long addr, unsigned int order);
508extern void free_hot_cold_page(struct page *page, bool cold);
509extern void free_hot_cold_page_list(struct list_head *list, bool cold);
510
511struct page_frag_cache;
512extern void *__alloc_page_frag(struct page_frag_cache *nc,
513 unsigned int fragsz, gfp_t gfp_mask);
514extern void __free_page_frag(void *addr);
515
516extern void __free_kmem_pages(struct page *page, unsigned int order);
517extern void free_kmem_pages(unsigned long addr, unsigned int order);
518
519#define __free_page(page) __free_pages((page), 0)
520#define free_page(addr) free_pages((addr), 0)
521
522void page_alloc_init(void);
523void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
524void drain_all_pages(struct zone *zone);
525void drain_local_pages(struct zone *zone);
526
527void page_alloc_init_late(void);
528
529
530
531
532
533
534
535
536extern gfp_t gfp_allowed_mask;
537
538
539bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
540
541extern void pm_restrict_gfp_mask(void);
542extern void pm_restore_gfp_mask(void);
543
544#ifdef CONFIG_PM_SLEEP
545extern bool pm_suspended_storage(void);
546#else
547static inline bool pm_suspended_storage(void)
548{
549 return false;
550}
551#endif
552
553#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
554
555extern int alloc_contig_range(unsigned long start, unsigned long end,
556 unsigned migratetype);
557extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
558#endif
559
560#ifdef CONFIG_CMA
561
562extern void init_cma_reserved_pageblock(struct page *page);
563#endif
564
565#endif
566