1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmzone.h>
5#include <linux/stddef.h>
6#include <linux/linkage.h>
7#include <linux/topology.h>
8#include <linux/mmdebug.h>
9
10struct vm_area_struct;
11
12
13#define ___GFP_DMA 0x01u
14#define ___GFP_HIGHMEM 0x02u
15#define ___GFP_DMA32 0x04u
16#define ___GFP_MOVABLE 0x08u
17#define ___GFP_WAIT 0x10u
18#define ___GFP_HIGH 0x20u
19#define ___GFP_IO 0x40u
20#define ___GFP_FS 0x80u
21#define ___GFP_COLD 0x100u
22#define ___GFP_NOWARN 0x200u
23#define ___GFP_REPEAT 0x400u
24#define ___GFP_NOFAIL 0x800u
25#define ___GFP_NORETRY 0x1000u
26#define ___GFP_COMP 0x4000u
27#define ___GFP_ZERO 0x8000u
28#define ___GFP_NOMEMALLOC 0x10000u
29#define ___GFP_HARDWALL 0x20000u
30#define ___GFP_THISNODE 0x40000u
31#define ___GFP_RECLAIMABLE 0x80000u
32#ifdef CONFIG_KMEMCHECK
33#define ___GFP_NOTRACK 0x200000u
34#else
35#define ___GFP_NOTRACK 0
36#endif
37#define ___GFP_NO_KSWAPD 0x400000u
38#define ___GFP_OTHER_NODE 0x800000u
39#define ___GFP_WRITE 0x1000000u
40
41
42
43
44
45
46
47
48
49
50#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
51#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
52#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
53#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE)
54#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT)
71#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
72#define __GFP_IO ((__force gfp_t)___GFP_IO)
73#define __GFP_FS ((__force gfp_t)___GFP_FS)
74#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
75#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
76#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
77#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
78#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
79#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
80#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
81#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
82#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
83#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
84#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
85#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
86
87#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
88#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
89#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
90
91
92
93
94
95#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
96
97#define __GFP_BITS_SHIFT 25
98#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
99
100
101#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
102
103#define GFP_ATOMIC (__GFP_HIGH)
104#define GFP_NOIO (__GFP_WAIT)
105#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
106#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
107#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
108 __GFP_RECLAIMABLE)
109#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
110#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
111 __GFP_HIGHMEM)
112#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
113 __GFP_HARDWALL | __GFP_HIGHMEM | \
114 __GFP_MOVABLE)
115#define GFP_IOFS (__GFP_IO | __GFP_FS)
116#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
117 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
118 __GFP_NO_KSWAPD)
119
120#ifdef CONFIG_NUMA
121#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
122#else
123#define GFP_THISNODE ((__force gfp_t)0)
124#endif
125
126
127#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
128
129
130#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
131 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
132 __GFP_NORETRY|__GFP_NOMEMALLOC)
133
134
135#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
136
137
138#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
139
140
141#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
142
143
144
145
146#define GFP_DMA __GFP_DMA
147
148
149#define GFP_DMA32 __GFP_DMA32
150
151
152static inline int allocflags_to_migratetype(gfp_t gfp_flags)
153{
154 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
155
156 if (unlikely(page_group_by_mobility_disabled))
157 return MIGRATE_UNMOVABLE;
158
159
160 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
161 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
162}
163
164#ifdef CONFIG_HIGHMEM
165#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
166#else
167#define OPT_ZONE_HIGHMEM ZONE_NORMAL
168#endif
169
170#ifdef CONFIG_ZONE_DMA
171#define OPT_ZONE_DMA ZONE_DMA
172#else
173#define OPT_ZONE_DMA ZONE_NORMAL
174#endif
175
176#ifdef CONFIG_ZONE_DMA32
177#define OPT_ZONE_DMA32 ZONE_DMA32
178#else
179#define OPT_ZONE_DMA32 ZONE_NORMAL
180#endif
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215#if 16 * ZONES_SHIFT > BITS_PER_LONG
216#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
217#endif
218
219#define GFP_ZONE_TABLE ( \
220 (ZONE_NORMAL << 0 * ZONES_SHIFT) \
221 | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \
222 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \
223 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \
224 | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \
225 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \
226 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \
227 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \
228)
229
230
231
232
233
234
235
236#define GFP_ZONE_BAD ( \
237 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
238 | 1 << (___GFP_DMA | ___GFP_DMA32) \
239 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
240 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
241 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
242 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
243 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
244 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
245)
246
247static inline enum zone_type gfp_zone(gfp_t flags)
248{
249 enum zone_type z;
250 int bit = (__force int) (flags & GFP_ZONEMASK);
251
252 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
253 ((1 << ZONES_SHIFT) - 1);
254 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
255 return z;
256}
257
258
259
260
261
262
263
264
265static inline int gfp_zonelist(gfp_t flags)
266{
267 if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
268 return 1;
269
270 return 0;
271}
272
273
274
275
276
277
278
279
280
281
282static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
283{
284 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
285}
286
287#ifndef HAVE_ARCH_FREE_PAGE
288static inline void arch_free_page(struct page *page, int order) { }
289#endif
290#ifndef HAVE_ARCH_ALLOC_PAGE
291static inline void arch_alloc_page(struct page *page, int order) { }
292#endif
293
294struct page *
295__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
296 struct zonelist *zonelist, nodemask_t *nodemask);
297
298static inline struct page *
299__alloc_pages(gfp_t gfp_mask, unsigned int order,
300 struct zonelist *zonelist)
301{
302 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
303}
304
305static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
306 unsigned int order)
307{
308
309 if (nid < 0)
310 nid = numa_node_id();
311
312 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
313}
314
315static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
316 unsigned int order)
317{
318 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
319
320 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
321}
322
323#ifdef CONFIG_NUMA
324extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
325
326static inline struct page *
327alloc_pages(gfp_t gfp_mask, unsigned int order)
328{
329 return alloc_pages_current(gfp_mask, order);
330}
331extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
332 struct vm_area_struct *vma, unsigned long addr,
333 int node);
334#else
335#define alloc_pages(gfp_mask, order) \
336 alloc_pages_node(numa_node_id(), gfp_mask, order)
337#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
338 alloc_pages(gfp_mask, order)
339#endif
340#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
341#define alloc_page_vma(gfp_mask, vma, addr) \
342 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
343#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
344 alloc_pages_vma(gfp_mask, 0, vma, addr, node)
345
346extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
347extern unsigned long get_zeroed_page(gfp_t gfp_mask);
348
349void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
350void free_pages_exact(void *virt, size_t size);
351
352void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
353
354#define __get_free_page(gfp_mask) \
355 __get_free_pages((gfp_mask), 0)
356
357#define __get_dma_pages(gfp_mask, order) \
358 __get_free_pages((gfp_mask) | GFP_DMA, (order))
359
360extern void __free_pages(struct page *page, unsigned int order);
361extern void free_pages(unsigned long addr, unsigned int order);
362extern void free_hot_cold_page(struct page *page, int cold);
363extern void free_hot_cold_page_list(struct list_head *list, int cold);
364
365#define __free_page(page) __free_pages((page), 0)
366#define free_page(addr) free_pages((addr), 0)
367
368void page_alloc_init(void);
369void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
370void drain_all_pages(void);
371void drain_local_pages(void *dummy);
372
373
374
375
376
377
378
379
380extern gfp_t gfp_allowed_mask;
381
382extern void pm_restrict_gfp_mask(void);
383extern void pm_restore_gfp_mask(void);
384
385#ifdef CONFIG_PM_SLEEP
386extern bool pm_suspended_storage(void);
387#else
388static inline bool pm_suspended_storage(void)
389{
390 return false;
391}
392#endif
393
394#ifdef CONFIG_CMA
395
396
397extern int alloc_contig_range(unsigned long start, unsigned long end,
398 unsigned migratetype);
399extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
400
401
402extern void init_cma_reserved_pageblock(struct page *page);
403
404#endif
405
406#endif
407