1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmzone.h>
5#include <linux/stddef.h>
6#include <linux/linkage.h>
7#include <linux/topology.h>
8#include <linux/mmdebug.h>
9
10struct vm_area_struct;
11
12
13#define ___GFP_DMA 0x01u
14#define ___GFP_HIGHMEM 0x02u
15#define ___GFP_DMA32 0x04u
16#define ___GFP_MOVABLE 0x08u
17#define ___GFP_WAIT 0x10u
18#define ___GFP_HIGH 0x20u
19#define ___GFP_IO 0x40u
20#define ___GFP_FS 0x80u
21#define ___GFP_COLD 0x100u
22#define ___GFP_NOWARN 0x200u
23#define ___GFP_REPEAT 0x400u
24#define ___GFP_NOFAIL 0x800u
25#define ___GFP_NORETRY 0x1000u
26#define ___GFP_MEMALLOC 0x2000u
27#define ___GFP_COMP 0x4000u
28#define ___GFP_ZERO 0x8000u
29#define ___GFP_NOMEMALLOC 0x10000u
30#define ___GFP_HARDWALL 0x20000u
31#define ___GFP_THISNODE 0x40000u
32#define ___GFP_RECLAIMABLE 0x80000u
33#define ___GFP_KMEMCG 0x100000u
34#define ___GFP_NOTRACK 0x200000u
35#define ___GFP_NO_KSWAPD 0x400000u
36#define ___GFP_OTHER_NODE 0x800000u
37#define ___GFP_WRITE 0x1000000u
38
39
40
41
42
43
44
45
46
47
48
49#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
50#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
51#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
52#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE)
53#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT)
70#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
71#define __GFP_IO ((__force gfp_t)___GFP_IO)
72#define __GFP_FS ((__force gfp_t)___GFP_FS)
73#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
74#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
75#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
76#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
77#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
78#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
79#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
80#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
81#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
82
83
84
85
86#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
87#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
88#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
89#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
90
91#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
92#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
93#define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG)
94#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
95
96
97
98
99
100#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
101
102#define __GFP_BITS_SHIFT 25
103#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
104
105
106#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
107
108#define GFP_ATOMIC (__GFP_HIGH)
109#define GFP_NOIO (__GFP_WAIT)
110#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
111#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
112#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
113 __GFP_RECLAIMABLE)
114#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
115#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
116 __GFP_HIGHMEM)
117#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
118 __GFP_HARDWALL | __GFP_HIGHMEM | \
119 __GFP_MOVABLE)
120#define GFP_IOFS (__GFP_IO | __GFP_FS)
121#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
122 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
123 __GFP_NO_KSWAPD)
124
125#ifdef CONFIG_NUMA
126#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
127#else
128#define GFP_THISNODE ((__force gfp_t)0)
129#endif
130
131
132#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
133
134
135#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
136 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
137 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
138
139
140#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
141
142
143#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
144
145
146#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
147
148
149
150
151#define GFP_DMA __GFP_DMA
152
153
154#define GFP_DMA32 __GFP_DMA32
155
156
157static inline int allocflags_to_migratetype(gfp_t gfp_flags)
158{
159 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
160
161 if (unlikely(page_group_by_mobility_disabled))
162 return MIGRATE_UNMOVABLE;
163
164
165 return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
166 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
167}
168
169#ifdef CONFIG_HIGHMEM
170#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
171#else
172#define OPT_ZONE_HIGHMEM ZONE_NORMAL
173#endif
174
175#ifdef CONFIG_ZONE_DMA
176#define OPT_ZONE_DMA ZONE_DMA
177#else
178#define OPT_ZONE_DMA ZONE_NORMAL
179#endif
180
181#ifdef CONFIG_ZONE_DMA32
182#define OPT_ZONE_DMA32 ZONE_DMA32
183#else
184#define OPT_ZONE_DMA32 ZONE_NORMAL
185#endif
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220#if 16 * ZONES_SHIFT > BITS_PER_LONG
221#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
222#endif
223
224#define GFP_ZONE_TABLE ( \
225 (ZONE_NORMAL << 0 * ZONES_SHIFT) \
226 | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \
227 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \
228 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \
229 | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \
230 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \
231 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \
232 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \
233)
234
235
236
237
238
239
240
241#define GFP_ZONE_BAD ( \
242 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
243 | 1 << (___GFP_DMA | ___GFP_DMA32) \
244 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
245 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
246 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
247 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
248 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
249 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
250)
251
252static inline enum zone_type gfp_zone(gfp_t flags)
253{
254 enum zone_type z;
255 int bit = (__force int) (flags & GFP_ZONEMASK);
256
257 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
258 ((1 << ZONES_SHIFT) - 1);
259 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
260 return z;
261}
262
263
264
265
266
267
268
269
270static inline int gfp_zonelist(gfp_t flags)
271{
272 if (IS_ENABLED(CONFIG_NUMA) && unlikely(flags & __GFP_THISNODE))
273 return 1;
274
275 return 0;
276}
277
278
279
280
281
282
283
284
285
286
287static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
288{
289 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
290}
291
292#ifndef HAVE_ARCH_FREE_PAGE
293static inline void arch_free_page(struct page *page, int order) { }
294#endif
295#ifndef HAVE_ARCH_ALLOC_PAGE
296static inline void arch_alloc_page(struct page *page, int order) { }
297#endif
298
299struct page *
300__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
301 struct zonelist *zonelist, nodemask_t *nodemask);
302
303static inline struct page *
304__alloc_pages(gfp_t gfp_mask, unsigned int order,
305 struct zonelist *zonelist)
306{
307 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
308}
309
310static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
311 unsigned int order)
312{
313
314 if (nid < 0)
315 nid = numa_node_id();
316
317 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
318}
319
320static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
321 unsigned int order)
322{
323 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
324
325 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
326}
327
328#ifdef CONFIG_NUMA
329extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
330
331static inline struct page *
332alloc_pages(gfp_t gfp_mask, unsigned int order)
333{
334 return alloc_pages_current(gfp_mask, order);
335}
336extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
337 struct vm_area_struct *vma, unsigned long addr,
338 int node);
339#else
340#define alloc_pages(gfp_mask, order) \
341 alloc_pages_node(numa_node_id(), gfp_mask, order)
342#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
343 alloc_pages(gfp_mask, order)
344#endif
345#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
346#define alloc_page_vma(gfp_mask, vma, addr) \
347 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
348#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
349 alloc_pages_vma(gfp_mask, 0, vma, addr, node)
350
351extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
352extern unsigned long get_zeroed_page(gfp_t gfp_mask);
353
354void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
355void free_pages_exact(void *virt, size_t size);
356
357void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
358
359#define __get_free_page(gfp_mask) \
360 __get_free_pages((gfp_mask), 0)
361
362#define __get_dma_pages(gfp_mask, order) \
363 __get_free_pages((gfp_mask) | GFP_DMA, (order))
364
365extern void __free_pages(struct page *page, unsigned int order);
366extern void free_pages(unsigned long addr, unsigned int order);
367extern void free_hot_cold_page(struct page *page, int cold);
368extern void free_hot_cold_page_list(struct list_head *list, int cold);
369
370extern void __free_memcg_kmem_pages(struct page *page, unsigned int order);
371extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order);
372
373#define __free_page(page) __free_pages((page), 0)
374#define free_page(addr) free_pages((addr), 0)
375
376void page_alloc_init(void);
377void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
378void drain_all_pages(void);
379void drain_local_pages(void *dummy);
380
381
382
383
384
385
386
387
388extern gfp_t gfp_allowed_mask;
389
390
391bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
392
393extern void pm_restrict_gfp_mask(void);
394extern void pm_restore_gfp_mask(void);
395
396#ifdef CONFIG_PM_SLEEP
397extern bool pm_suspended_storage(void);
398#else
399static inline bool pm_suspended_storage(void)
400{
401 return false;
402}
403#endif
404
405#ifdef CONFIG_CMA
406
407
408extern int alloc_contig_range(unsigned long start, unsigned long end,
409 unsigned migratetype);
410extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
411
412
413extern void init_cma_reserved_pageblock(struct page *page);
414
415#endif
416
417#endif
418