1
2#ifndef _LINUX_MEMBLOCK_H
3#define _LINUX_MEMBLOCK_H
4#ifdef __KERNEL__
5
6
7
8
9
10
11
12#include <linux/init.h>
13#include <linux/mm.h>
14#include <asm/dma.h>
15
16extern unsigned long max_low_pfn;
17extern unsigned long min_low_pfn;
18
19
20
21
22extern unsigned long max_pfn;
23
24
25
26extern unsigned long long max_possible_pfn;
27
28
29
30
31
32
33
34
35
36
37enum memblock_flags {
38 MEMBLOCK_NONE = 0x0,
39 MEMBLOCK_HOTPLUG = 0x1,
40 MEMBLOCK_MIRROR = 0x2,
41 MEMBLOCK_NOMAP = 0x4,
42};
43
44
45
46
47
48
49
50
51struct memblock_region {
52 phys_addr_t base;
53 phys_addr_t size;
54 enum memblock_flags flags;
55#ifdef CONFIG_NUMA
56 int nid;
57#endif
58};
59
60
61
62
63
64
65
66
67
68struct memblock_type {
69 unsigned long cnt;
70 unsigned long max;
71 phys_addr_t total_size;
72 struct memblock_region *regions;
73 char *name;
74};
75
76
77
78
79
80
81
82
83struct memblock {
84 bool bottom_up;
85 phys_addr_t current_limit;
86 struct memblock_type memory;
87 struct memblock_type reserved;
88};
89
90extern struct memblock memblock;
91
92#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
93#define __init_memblock __meminit
94#define __initdata_memblock __meminitdata
95void memblock_discard(void);
96#else
97#define __init_memblock
98#define __initdata_memblock
99static inline void memblock_discard(void) {}
100#endif
101
102void memblock_allow_resize(void);
103int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
104int memblock_add(phys_addr_t base, phys_addr_t size);
105int memblock_remove(phys_addr_t base, phys_addr_t size);
106int memblock_free(phys_addr_t base, phys_addr_t size);
107int memblock_reserve(phys_addr_t base, phys_addr_t size);
108#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
109int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
110#endif
111void memblock_trim_memory(phys_addr_t align);
112bool memblock_overlaps_region(struct memblock_type *type,
113 phys_addr_t base, phys_addr_t size);
114int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
115int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
116int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
117int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
118int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
119
120void memblock_free_all(void);
121void memblock_free_ptr(void *ptr, size_t size);
122void reset_node_managed_pages(pg_data_t *pgdat);
123void reset_all_zones_managed_pages(void);
124
125
126void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
127 struct memblock_type *type_a,
128 struct memblock_type *type_b, phys_addr_t *out_start,
129 phys_addr_t *out_end, int *out_nid);
130
131void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
132 struct memblock_type *type_a,
133 struct memblock_type *type_b, phys_addr_t *out_start,
134 phys_addr_t *out_end, int *out_nid);
135
136void __memblock_free_late(phys_addr_t base, phys_addr_t size);
137
138#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
139static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
140 phys_addr_t *out_start,
141 phys_addr_t *out_end)
142{
143 extern struct memblock_type physmem;
144
145 __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
146 out_start, out_end, NULL);
147}
148
149
150
151
152
153
154
155
156#define for_each_physmem_range(i, type, p_start, p_end) \
157 for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
158 i != (u64)ULLONG_MAX; \
159 __next_physmem_range(&i, type, p_start, p_end))
160#endif
161
162
163
164
165
166
167
168
169
170
171
172
173
174#define __for_each_mem_range(i, type_a, type_b, nid, flags, \
175 p_start, p_end, p_nid) \
176 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
177 p_start, p_end, p_nid); \
178 i != (u64)ULLONG_MAX; \
179 __next_mem_range(&i, nid, flags, type_a, type_b, \
180 p_start, p_end, p_nid))
181
182
183
184
185
186
187
188
189
190
191
192
193
194#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
195 p_start, p_end, p_nid) \
196 for (i = (u64)ULLONG_MAX, \
197 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
198 p_start, p_end, p_nid); \
199 i != (u64)ULLONG_MAX; \
200 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
201 p_start, p_end, p_nid))
202
203
204
205
206
207
208
209#define for_each_mem_range(i, p_start, p_end) \
210 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
211 MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
212
213
214
215
216
217
218
219
220#define for_each_mem_range_rev(i, p_start, p_end) \
221 __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
222 MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
223
224
225
226
227
228
229
230
231
232
233#define for_each_reserved_mem_range(i, p_start, p_end) \
234 __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
235 MEMBLOCK_NONE, p_start, p_end, NULL)
236
237static inline bool memblock_is_hotpluggable(struct memblock_region *m)
238{
239 return m->flags & MEMBLOCK_HOTPLUG;
240}
241
242static inline bool memblock_is_mirror(struct memblock_region *m)
243{
244 return m->flags & MEMBLOCK_MIRROR;
245}
246
247static inline bool memblock_is_nomap(struct memblock_region *m)
248{
249 return m->flags & MEMBLOCK_NOMAP;
250}
251
252int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
253 unsigned long *end_pfn);
254void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
255 unsigned long *out_end_pfn, int *out_nid);
256
257
258
259
260
261
262
263
264
265
266
267#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
268 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
269 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
270
271#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
272void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
273 unsigned long *out_spfn,
274 unsigned long *out_epfn);
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
290 for (i = 0, \
291 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
292 i != U64_MAX; \
293 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
294
295
296
297
298
299
300
301
302
303
304
305
306
307#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
308 for (; i != U64_MAX; \
309 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
310
311int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
312
313#endif
314
315
316
317
318
319
320
321
322
323
324
325
326
327#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
328 __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
329 nid, flags, p_start, p_end, p_nid)
330
331
332
333
334
335
336
337
338
339
340
341
342
343#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
344 p_nid) \
345 __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
346 nid, flags, p_start, p_end, p_nid)
347
348int memblock_set_node(phys_addr_t base, phys_addr_t size,
349 struct memblock_type *type, int nid);
350
351#ifdef CONFIG_NUMA
352static inline void memblock_set_region_node(struct memblock_region *r, int nid)
353{
354 r->nid = nid;
355}
356
357static inline int memblock_get_region_node(const struct memblock_region *r)
358{
359 return r->nid;
360}
361#else
362static inline void memblock_set_region_node(struct memblock_region *r, int nid)
363{
364}
365
366static inline int memblock_get_region_node(const struct memblock_region *r)
367{
368 return 0;
369}
370#endif
371
372
373#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
374#define MEMBLOCK_ALLOC_ACCESSIBLE 0
375#define MEMBLOCK_ALLOC_KASAN 1
376
377
378#define MEMBLOCK_LOW_LIMIT 0
379
380#ifndef ARCH_LOW_ADDRESS_LIMIT
381#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
382#endif
383
384phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
385 phys_addr_t start, phys_addr_t end);
386phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
387 phys_addr_t align, phys_addr_t start,
388 phys_addr_t end, int nid, bool exact_nid);
389phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
390
391static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
392 phys_addr_t align)
393{
394 return memblock_phys_alloc_range(size, align, 0,
395 MEMBLOCK_ALLOC_ACCESSIBLE);
396}
397
398void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
399 phys_addr_t min_addr, phys_addr_t max_addr,
400 int nid);
401void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
402 phys_addr_t min_addr, phys_addr_t max_addr,
403 int nid);
404void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
405 phys_addr_t min_addr, phys_addr_t max_addr,
406 int nid);
407
408static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
409{
410 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
411 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
412}
413
414static inline void *memblock_alloc_raw(phys_addr_t size,
415 phys_addr_t align)
416{
417 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
418 MEMBLOCK_ALLOC_ACCESSIBLE,
419 NUMA_NO_NODE);
420}
421
422static inline void *memblock_alloc_from(phys_addr_t size,
423 phys_addr_t align,
424 phys_addr_t min_addr)
425{
426 return memblock_alloc_try_nid(size, align, min_addr,
427 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
428}
429
430static inline void *memblock_alloc_low(phys_addr_t size,
431 phys_addr_t align)
432{
433 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
434 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
435}
436
437static inline void *memblock_alloc_node(phys_addr_t size,
438 phys_addr_t align, int nid)
439{
440 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
441 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
442}
443
444static inline void memblock_free_early(phys_addr_t base,
445 phys_addr_t size)
446{
447 memblock_free(base, size);
448}
449
450static inline void memblock_free_early_nid(phys_addr_t base,
451 phys_addr_t size, int nid)
452{
453 memblock_free(base, size);
454}
455
456static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
457{
458 __memblock_free_late(base, size);
459}
460
461
462
463
464static inline __init_memblock void memblock_set_bottom_up(bool enable)
465{
466 memblock.bottom_up = enable;
467}
468
469
470
471
472
473
474static inline __init_memblock bool memblock_bottom_up(void)
475{
476 return memblock.bottom_up;
477}
478
479phys_addr_t memblock_phys_mem_size(void);
480phys_addr_t memblock_reserved_size(void);
481phys_addr_t memblock_start_of_DRAM(void);
482phys_addr_t memblock_end_of_DRAM(void);
483void memblock_enforce_memory_limit(phys_addr_t memory_limit);
484void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
485void memblock_mem_limit_remove_map(phys_addr_t limit);
486bool memblock_is_memory(phys_addr_t addr);
487bool memblock_is_map_memory(phys_addr_t addr);
488bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
489bool memblock_is_reserved(phys_addr_t addr);
490bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
491
492void memblock_dump_all(void);
493
494
495
496
497
498
499
500void memblock_set_current_limit(phys_addr_t limit);
501
502
503phys_addr_t memblock_get_current_limit(void);
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
520{
521 return PFN_UP(reg->base);
522}
523
524
525
526
527
528
529
530static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
531{
532 return PFN_DOWN(reg->base + reg->size);
533}
534
535
536
537
538
539
540
541static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
542{
543 return PFN_DOWN(reg->base);
544}
545
546
547
548
549
550
551
552static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
553{
554 return PFN_UP(reg->base + reg->size);
555}
556
557
558
559
560
561#define for_each_mem_region(region) \
562 for (region = memblock.memory.regions; \
563 region < (memblock.memory.regions + memblock.memory.cnt); \
564 region++)
565
566
567
568
569
570#define for_each_reserved_mem_region(region) \
571 for (region = memblock.reserved.regions; \
572 region < (memblock.reserved.regions + memblock.reserved.cnt); \
573 region++)
574
575extern void *alloc_large_system_hash(const char *tablename,
576 unsigned long bucketsize,
577 unsigned long numentries,
578 int scale,
579 int flags,
580 unsigned int *_hash_shift,
581 unsigned int *_hash_mask,
582 unsigned long low_limit,
583 unsigned long high_limit);
584
585#define HASH_EARLY 0x00000001
586#define HASH_SMALL 0x00000002
587
588#define HASH_ZERO 0x00000004
589
590
591
592
593#ifdef CONFIG_NUMA
594#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
595extern int hashdist;
596#else
597#define hashdist (0)
598#endif
599
600#ifdef CONFIG_MEMTEST
601extern void early_memtest(phys_addr_t start, phys_addr_t end);
602#else
603static inline void early_memtest(phys_addr_t start, phys_addr_t end)
604{
605}
606#endif
607
608#endif
609
610#endif
611