1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmzone.h>
5#include <linux/stddef.h>
6#include <linux/linkage.h>
7#include <linux/topology.h>
8#include <linux/mmdebug.h>
9
10struct vm_area_struct;
11
12
13
14
15
16
17
18
19
20
21#define __GFP_DMA ((__force gfp_t)0x01u)
22#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
23#define __GFP_DMA32 ((__force gfp_t)0x04u)
24#define __GFP_MOVABLE ((__force gfp_t)0x08u)
25#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#define __GFP_WAIT ((__force gfp_t)0x10u)
41#define __GFP_HIGH ((__force gfp_t)0x20u)
42#define __GFP_IO ((__force gfp_t)0x40u)
43#define __GFP_FS ((__force gfp_t)0x80u)
44#define __GFP_COLD ((__force gfp_t)0x100u)
45#define __GFP_NOWARN ((__force gfp_t)0x200u)
46#define __GFP_REPEAT ((__force gfp_t)0x400u)
47#define __GFP_NOFAIL ((__force gfp_t)0x800u)
48#define __GFP_NORETRY ((__force gfp_t)0x1000u)
49#define __GFP_COMP ((__force gfp_t)0x4000u)
50#define __GFP_ZERO ((__force gfp_t)0x8000u)
51#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u)
52#define __GFP_HARDWALL ((__force gfp_t)0x20000u)
53#define __GFP_THISNODE ((__force gfp_t)0x40000u)
54#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u)
55
56#ifdef CONFIG_KMEMCHECK
57#define __GFP_NOTRACK ((__force gfp_t)0x200000u)
58#else
59#define __GFP_NOTRACK ((__force gfp_t)0)
60#endif
61
62
63
64
65
66#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
67
68#define __GFP_BITS_SHIFT 22
69#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
70
71
72#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
73
74#define GFP_ATOMIC (__GFP_HIGH)
75#define GFP_NOIO (__GFP_WAIT)
76#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
77#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
78#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
79 __GFP_RECLAIMABLE)
80#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
81#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
82 __GFP_HIGHMEM)
83#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
84 __GFP_HARDWALL | __GFP_HIGHMEM | \
85 __GFP_MOVABLE)
86
87#ifdef CONFIG_NUMA
88#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
89#else
90#define GFP_THISNODE ((__force gfp_t)0)
91#endif
92
93
94#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
95
96
97#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
98 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
99 __GFP_NORETRY|__GFP_NOMEMALLOC)
100
101
102#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
103
104
105#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
106
107
108#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
109
110
111
112
113#define GFP_DMA __GFP_DMA
114
115
116#define GFP_DMA32 __GFP_DMA32
117
118
119static inline int allocflags_to_migratetype(gfp_t gfp_flags)
120{
121 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
122
123 if (unlikely(page_group_by_mobility_disabled))
124 return MIGRATE_UNMOVABLE;
125
126
127 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
128 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
129}
130
131#ifdef CONFIG_HIGHMEM
132#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
133#else
134#define OPT_ZONE_HIGHMEM ZONE_NORMAL
135#endif
136
137#ifdef CONFIG_ZONE_DMA
138#define OPT_ZONE_DMA ZONE_DMA
139#else
140#define OPT_ZONE_DMA ZONE_NORMAL
141#endif
142
143#ifdef CONFIG_ZONE_DMA32
144#define OPT_ZONE_DMA32 ZONE_DMA32
145#else
146#define OPT_ZONE_DMA32 ZONE_NORMAL
147#endif
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182#if 16 * ZONES_SHIFT > BITS_PER_LONG
183#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
184#endif
185
186#define GFP_ZONE_TABLE ( \
187 (ZONE_NORMAL << 0 * ZONES_SHIFT) \
188 | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \
189 | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \
190 | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \
191 | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \
192 | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \
193 | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\
194 | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\
195)
196
197
198
199
200
201
202
203#define GFP_ZONE_BAD ( \
204 1 << (__GFP_DMA | __GFP_HIGHMEM) \
205 | 1 << (__GFP_DMA | __GFP_DMA32) \
206 | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \
207 | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \
208 | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \
209 | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \
210 | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \
211 | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\
212)
213
214static inline enum zone_type gfp_zone(gfp_t flags)
215{
216 enum zone_type z;
217 int bit = flags & GFP_ZONEMASK;
218
219 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
220 ((1 << ZONES_SHIFT) - 1);
221
222 if (__builtin_constant_p(bit))
223 MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
224 else {
225#ifdef CONFIG_DEBUG_VM
226 BUG_ON((GFP_ZONE_BAD >> bit) & 1);
227#endif
228 }
229 return z;
230}
231
232
233
234
235
236
237
238
239static inline int gfp_zonelist(gfp_t flags)
240{
241 if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
242 return 1;
243
244 return 0;
245}
246
247
248
249
250
251
252
253
254
255
256static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
257{
258 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
259}
260
261#ifndef HAVE_ARCH_FREE_PAGE
262static inline void arch_free_page(struct page *page, int order) { }
263#endif
264#ifndef HAVE_ARCH_ALLOC_PAGE
265static inline void arch_alloc_page(struct page *page, int order) { }
266#endif
267
268struct page *
269__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
270 struct zonelist *zonelist, nodemask_t *nodemask);
271
272static inline struct page *
273__alloc_pages(gfp_t gfp_mask, unsigned int order,
274 struct zonelist *zonelist)
275{
276 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
277}
278
279static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
280 unsigned int order)
281{
282
283 if (nid < 0)
284 nid = numa_node_id();
285
286 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
287}
288
289static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
290 unsigned int order)
291{
292 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
293
294 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
295}
296
297#ifdef CONFIG_NUMA
298extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
299
300static inline struct page *
301alloc_pages(gfp_t gfp_mask, unsigned int order)
302{
303 return alloc_pages_current(gfp_mask, order);
304}
305extern struct page *alloc_page_vma(gfp_t gfp_mask,
306 struct vm_area_struct *vma, unsigned long addr);
307#else
308#define alloc_pages(gfp_mask, order) \
309 alloc_pages_node(numa_node_id(), gfp_mask, order)
310#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
311#endif
312#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
313
314extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
315extern unsigned long get_zeroed_page(gfp_t gfp_mask);
316
317void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
318void free_pages_exact(void *virt, size_t size);
319
320#define __get_free_page(gfp_mask) \
321 __get_free_pages((gfp_mask),0)
322
323#define __get_dma_pages(gfp_mask, order) \
324 __get_free_pages((gfp_mask) | GFP_DMA,(order))
325
326extern void __free_pages(struct page *page, unsigned int order);
327extern void free_pages(unsigned long addr, unsigned int order);
328extern void free_hot_page(struct page *page);
329
330#define __free_page(page) __free_pages((page), 0)
331#define free_page(addr) free_pages((addr),0)
332
333void page_alloc_init(void);
334void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
335void drain_all_pages(void);
336void drain_local_pages(void *dummy);
337
338extern gfp_t gfp_allowed_mask;
339
340static inline void set_gfp_allowed_mask(gfp_t mask)
341{
342 gfp_allowed_mask = mask;
343}
344
345#endif
346