1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmzone.h>
5#include <linux/stddef.h>
6#include <linux/linkage.h>
7#include <linux/topology.h>
8#include <linux/mmdebug.h>
9
10struct vm_area_struct;
11
12
13#define ___GFP_DMA 0x01u
14#define ___GFP_HIGHMEM 0x02u
15#define ___GFP_DMA32 0x04u
16#define ___GFP_MOVABLE 0x08u
17#define ___GFP_WAIT 0x10u
18#define ___GFP_HIGH 0x20u
19#define ___GFP_IO 0x40u
20#define ___GFP_FS 0x80u
21#define ___GFP_COLD 0x100u
22#define ___GFP_NOWARN 0x200u
23#define ___GFP_REPEAT 0x400u
24#define ___GFP_NOFAIL 0x800u
25#define ___GFP_NORETRY 0x1000u
26#define ___GFP_COMP 0x4000u
27#define ___GFP_ZERO 0x8000u
28#define ___GFP_NOMEMALLOC 0x10000u
29#define ___GFP_HARDWALL 0x20000u
30#define ___GFP_THISNODE 0x40000u
31#define ___GFP_RECLAIMABLE 0x80000u
32#ifdef CONFIG_KMEMCHECK
33#define ___GFP_NOTRACK 0x200000u
34#else
35#define ___GFP_NOTRACK 0
36#endif
37#define ___GFP_NO_KSWAPD 0x400000u
38#define ___GFP_OTHER_NODE 0x800000u
39
40
41
42
43
44
45
46
47
48
49#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
50#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
51#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
52#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE)
53#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT)
70#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
71#define __GFP_IO ((__force gfp_t)___GFP_IO)
72#define __GFP_FS ((__force gfp_t)___GFP_FS)
73#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
74#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
75#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
76#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
77#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
78#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
79#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
80#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
81#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
82#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
83#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
84#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
85
86#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
87#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
88
89
90
91
92
93#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
94
95#define __GFP_BITS_SHIFT 24
96#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
97
98
99#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
100
101#define GFP_ATOMIC (__GFP_HIGH)
102#define GFP_NOIO (__GFP_WAIT)
103#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
104#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
105#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
106 __GFP_RECLAIMABLE)
107#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
108#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
109 __GFP_HIGHMEM)
110#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
111 __GFP_HARDWALL | __GFP_HIGHMEM | \
112 __GFP_MOVABLE)
113#define GFP_IOFS (__GFP_IO | __GFP_FS)
114#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
115 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
116 __GFP_NO_KSWAPD)
117
118#ifdef CONFIG_NUMA
119#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
120#else
121#define GFP_THISNODE ((__force gfp_t)0)
122#endif
123
124
125#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
126
127
128#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
129 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
130 __GFP_NORETRY|__GFP_NOMEMALLOC)
131
132
133#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
134
135
136#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
137
138
139#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
140
141
142
143
144#define GFP_DMA __GFP_DMA
145
146
147#define GFP_DMA32 __GFP_DMA32
148
149
150static inline int allocflags_to_migratetype(gfp_t gfp_flags)
151{
152 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
153
154 if (unlikely(page_group_by_mobility_disabled))
155 return MIGRATE_UNMOVABLE;
156
157
158 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
159 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
160}
161
162#ifdef CONFIG_HIGHMEM
163#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
164#else
165#define OPT_ZONE_HIGHMEM ZONE_NORMAL
166#endif
167
168#ifdef CONFIG_ZONE_DMA
169#define OPT_ZONE_DMA ZONE_DMA
170#else
171#define OPT_ZONE_DMA ZONE_NORMAL
172#endif
173
174#ifdef CONFIG_ZONE_DMA32
175#define OPT_ZONE_DMA32 ZONE_DMA32
176#else
177#define OPT_ZONE_DMA32 ZONE_NORMAL
178#endif
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213#if 16 * ZONES_SHIFT > BITS_PER_LONG
214#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
215#endif
216
217#define GFP_ZONE_TABLE ( \
218 (ZONE_NORMAL << 0 * ZONES_SHIFT) \
219 | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \
220 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \
221 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \
222 | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \
223 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \
224 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \
225 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \
226)
227
228
229
230
231
232
233
234#define GFP_ZONE_BAD ( \
235 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
236 | 1 << (___GFP_DMA | ___GFP_DMA32) \
237 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
238 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
239 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
240 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
241 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
242 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
243)
244
245static inline enum zone_type gfp_zone(gfp_t flags)
246{
247 enum zone_type z;
248 int bit = (__force int) (flags & GFP_ZONEMASK);
249
250 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
251 ((1 << ZONES_SHIFT) - 1);
252 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
253 return z;
254}
255
256
257
258
259
260
261
262
263static inline int gfp_zonelist(gfp_t flags)
264{
265 if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
266 return 1;
267
268 return 0;
269}
270
271
272
273
274
275
276
277
278
279
280static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
281{
282 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
283}
284
285#ifndef HAVE_ARCH_FREE_PAGE
286static inline void arch_free_page(struct page *page, int order) { }
287#endif
288#ifndef HAVE_ARCH_ALLOC_PAGE
289static inline void arch_alloc_page(struct page *page, int order) { }
290#endif
291
292struct page *
293__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
294 struct zonelist *zonelist, nodemask_t *nodemask);
295
296static inline struct page *
297__alloc_pages(gfp_t gfp_mask, unsigned int order,
298 struct zonelist *zonelist)
299{
300 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
301}
302
303static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
304 unsigned int order)
305{
306
307 if (nid < 0)
308 nid = numa_node_id();
309
310 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
311}
312
313static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
314 unsigned int order)
315{
316 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
317
318 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
319}
320
321#ifdef CONFIG_NUMA
322extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
323
324static inline struct page *
325alloc_pages(gfp_t gfp_mask, unsigned int order)
326{
327 return alloc_pages_current(gfp_mask, order);
328}
329extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
330 struct vm_area_struct *vma, unsigned long addr,
331 int node);
332#else
333#define alloc_pages(gfp_mask, order) \
334 alloc_pages_node(numa_node_id(), gfp_mask, order)
335#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
336 alloc_pages(gfp_mask, order)
337#endif
338#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
339#define alloc_page_vma(gfp_mask, vma, addr) \
340 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
341#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
342 alloc_pages_vma(gfp_mask, 0, vma, addr, node)
343
344extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
345extern unsigned long get_zeroed_page(gfp_t gfp_mask);
346
347void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
348void free_pages_exact(void *virt, size_t size);
349
350void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
351
352#define __get_free_page(gfp_mask) \
353 __get_free_pages((gfp_mask), 0)
354
355#define __get_dma_pages(gfp_mask, order) \
356 __get_free_pages((gfp_mask) | GFP_DMA, (order))
357
358extern void __free_pages(struct page *page, unsigned int order);
359extern void free_pages(unsigned long addr, unsigned int order);
360extern void free_hot_cold_page(struct page *page, int cold);
361
362#define __free_page(page) __free_pages((page), 0)
363#define free_page(addr) free_pages((addr), 0)
364
365void page_alloc_init(void);
366void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
367void drain_all_pages(void);
368void drain_local_pages(void *dummy);
369
370extern gfp_t gfp_allowed_mask;
371
372extern void pm_restrict_gfp_mask(void);
373extern void pm_restore_gfp_mask(void);
374
375#endif
376