1
2#ifndef _LINUX_MEMBLOCK_H
3#define _LINUX_MEMBLOCK_H
4#ifdef __KERNEL__
5
6
7
8
9
10
11
12#include <linux/init.h>
13#include <linux/mm.h>
14#include <asm/dma.h>
15
16extern unsigned long max_low_pfn;
17extern unsigned long min_low_pfn;
18
19
20
21
22extern unsigned long max_pfn;
23
24
25
26extern unsigned long long max_possible_pfn;
27
28
29
30
31
32
33
34
35
36
37enum memblock_flags {
38 MEMBLOCK_NONE = 0x0,
39 MEMBLOCK_HOTPLUG = 0x1,
40 MEMBLOCK_MIRROR = 0x2,
41 MEMBLOCK_NOMAP = 0x4,
42};
43
44
45
46
47
48
49
50
51struct memblock_region {
52 phys_addr_t base;
53 phys_addr_t size;
54 enum memblock_flags flags;
55#ifdef CONFIG_NUMA
56 int nid;
57#endif
58};
59
60
61
62
63
64
65
66
67
68struct memblock_type {
69 unsigned long cnt;
70 unsigned long max;
71 phys_addr_t total_size;
72 struct memblock_region *regions;
73 char *name;
74};
75
76
77
78
79
80
81
82
83struct memblock {
84 bool bottom_up;
85 phys_addr_t current_limit;
86 struct memblock_type memory;
87 struct memblock_type reserved;
88};
89
90extern struct memblock memblock;
91
92#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
93#define __init_memblock __meminit
94#define __initdata_memblock __meminitdata
95void memblock_discard(void);
96#else
97#define __init_memblock
98#define __initdata_memblock
99static inline void memblock_discard(void) {}
100#endif
101
102phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
103 phys_addr_t size, phys_addr_t align);
104void memblock_allow_resize(void);
105int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
106int memblock_add(phys_addr_t base, phys_addr_t size);
107int memblock_remove(phys_addr_t base, phys_addr_t size);
108int memblock_free(phys_addr_t base, phys_addr_t size);
109int memblock_reserve(phys_addr_t base, phys_addr_t size);
110#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
111int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
112#endif
113void memblock_trim_memory(phys_addr_t align);
114bool memblock_overlaps_region(struct memblock_type *type,
115 phys_addr_t base, phys_addr_t size);
116int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
117int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
118int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
119int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
120int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
121
122void memblock_free_all(void);
123void reset_node_managed_pages(pg_data_t *pgdat);
124void reset_all_zones_managed_pages(void);
125
126
127void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
128 struct memblock_type *type_a,
129 struct memblock_type *type_b, phys_addr_t *out_start,
130 phys_addr_t *out_end, int *out_nid);
131
132void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
133 struct memblock_type *type_a,
134 struct memblock_type *type_b, phys_addr_t *out_start,
135 phys_addr_t *out_end, int *out_nid);
136
137void __memblock_free_late(phys_addr_t base, phys_addr_t size);
138
139#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
140static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
141 phys_addr_t *out_start,
142 phys_addr_t *out_end)
143{
144 extern struct memblock_type physmem;
145
146 __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
147 out_start, out_end, NULL);
148}
149
150
151
152
153
154
155
156
157#define for_each_physmem_range(i, type, p_start, p_end) \
158 for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
159 i != (u64)ULLONG_MAX; \
160 __next_physmem_range(&i, type, p_start, p_end))
161#endif
162
163
164
165
166
167
168
169
170
171
172
173
174
175#define __for_each_mem_range(i, type_a, type_b, nid, flags, \
176 p_start, p_end, p_nid) \
177 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
178 p_start, p_end, p_nid); \
179 i != (u64)ULLONG_MAX; \
180 __next_mem_range(&i, nid, flags, type_a, type_b, \
181 p_start, p_end, p_nid))
182
183
184
185
186
187
188
189
190
191
192
193
194
195#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
196 p_start, p_end, p_nid) \
197 for (i = (u64)ULLONG_MAX, \
198 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
199 p_start, p_end, p_nid); \
200 i != (u64)ULLONG_MAX; \
201 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
202 p_start, p_end, p_nid))
203
204
205
206
207
208
209
210#define for_each_mem_range(i, p_start, p_end) \
211 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
212 MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
213
214
215
216
217
218
219
220
221#define for_each_mem_range_rev(i, p_start, p_end) \
222 __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
223 MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
224
225
226
227
228
229
230
231
232
233
234#define for_each_reserved_mem_range(i, p_start, p_end) \
235 __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
236 MEMBLOCK_NONE, p_start, p_end, NULL)
237
238static inline bool memblock_is_hotpluggable(struct memblock_region *m)
239{
240 return m->flags & MEMBLOCK_HOTPLUG;
241}
242
243static inline bool memblock_is_mirror(struct memblock_region *m)
244{
245 return m->flags & MEMBLOCK_MIRROR;
246}
247
248static inline bool memblock_is_nomap(struct memblock_region *m)
249{
250 return m->flags & MEMBLOCK_NOMAP;
251}
252
253int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
254 unsigned long *end_pfn);
255void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
256 unsigned long *out_end_pfn, int *out_nid);
257
258
259
260
261
262
263
264
265
266
267
268#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
269 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
270 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
271
272#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
273void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
274 unsigned long *out_spfn,
275 unsigned long *out_epfn);
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
291 for (i = 0, \
292 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
293 i != U64_MAX; \
294 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
295
296
297
298
299
300
301
302
303
304
305
306
307
308#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
309 for (; i != U64_MAX; \
310 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
311
312int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
313
314#endif
315
316
317
318
319
320
321
322
323
324
325
326
327
328#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
329 __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
330 nid, flags, p_start, p_end, p_nid)
331
332
333
334
335
336
337
338
339
340
341
342
343
344#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
345 p_nid) \
346 __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
347 nid, flags, p_start, p_end, p_nid)
348
349int memblock_set_node(phys_addr_t base, phys_addr_t size,
350 struct memblock_type *type, int nid);
351
352#ifdef CONFIG_NUMA
353static inline void memblock_set_region_node(struct memblock_region *r, int nid)
354{
355 r->nid = nid;
356}
357
358static inline int memblock_get_region_node(const struct memblock_region *r)
359{
360 return r->nid;
361}
362#else
363static inline void memblock_set_region_node(struct memblock_region *r, int nid)
364{
365}
366
367static inline int memblock_get_region_node(const struct memblock_region *r)
368{
369 return 0;
370}
371#endif
372
373
374#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
375#define MEMBLOCK_ALLOC_ACCESSIBLE 0
376#define MEMBLOCK_ALLOC_KASAN 1
377
378
379#define MEMBLOCK_LOW_LIMIT 0
380
381#ifndef ARCH_LOW_ADDRESS_LIMIT
382#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
383#endif
384
385phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
386 phys_addr_t start, phys_addr_t end);
387phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
388 phys_addr_t align, phys_addr_t start,
389 phys_addr_t end, int nid, bool exact_nid);
390phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
391
392static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
393 phys_addr_t align)
394{
395 return memblock_phys_alloc_range(size, align, 0,
396 MEMBLOCK_ALLOC_ACCESSIBLE);
397}
398
399void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
400 phys_addr_t min_addr, phys_addr_t max_addr,
401 int nid);
402void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
403 phys_addr_t min_addr, phys_addr_t max_addr,
404 int nid);
405void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
406 phys_addr_t min_addr, phys_addr_t max_addr,
407 int nid);
408
409static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
410{
411 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
412 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
413}
414
415static inline void *memblock_alloc_raw(phys_addr_t size,
416 phys_addr_t align)
417{
418 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
419 MEMBLOCK_ALLOC_ACCESSIBLE,
420 NUMA_NO_NODE);
421}
422
423static inline void *memblock_alloc_from(phys_addr_t size,
424 phys_addr_t align,
425 phys_addr_t min_addr)
426{
427 return memblock_alloc_try_nid(size, align, min_addr,
428 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
429}
430
431static inline void *memblock_alloc_low(phys_addr_t size,
432 phys_addr_t align)
433{
434 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
435 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
436}
437
438static inline void *memblock_alloc_node(phys_addr_t size,
439 phys_addr_t align, int nid)
440{
441 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
442 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
443}
444
445static inline void memblock_free_early(phys_addr_t base,
446 phys_addr_t size)
447{
448 memblock_free(base, size);
449}
450
451static inline void memblock_free_early_nid(phys_addr_t base,
452 phys_addr_t size, int nid)
453{
454 memblock_free(base, size);
455}
456
457static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
458{
459 __memblock_free_late(base, size);
460}
461
462
463
464
465static inline __init_memblock void memblock_set_bottom_up(bool enable)
466{
467 memblock.bottom_up = enable;
468}
469
470
471
472
473
474
475static inline __init_memblock bool memblock_bottom_up(void)
476{
477 return memblock.bottom_up;
478}
479
480phys_addr_t memblock_phys_mem_size(void);
481phys_addr_t memblock_reserved_size(void);
482phys_addr_t memblock_start_of_DRAM(void);
483phys_addr_t memblock_end_of_DRAM(void);
484void memblock_enforce_memory_limit(phys_addr_t memory_limit);
485void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
486void memblock_mem_limit_remove_map(phys_addr_t limit);
487bool memblock_is_memory(phys_addr_t addr);
488bool memblock_is_map_memory(phys_addr_t addr);
489bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
490bool memblock_is_reserved(phys_addr_t addr);
491bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
492
493void memblock_dump_all(void);
494
495
496
497
498
499
500
501void memblock_set_current_limit(phys_addr_t limit);
502
503
504phys_addr_t memblock_get_current_limit(void);
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
521{
522 return PFN_UP(reg->base);
523}
524
525
526
527
528
529
530
531static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
532{
533 return PFN_DOWN(reg->base + reg->size);
534}
535
536
537
538
539
540
541
542static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
543{
544 return PFN_DOWN(reg->base);
545}
546
547
548
549
550
551
552
553static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
554{
555 return PFN_UP(reg->base + reg->size);
556}
557
558
559
560
561
562#define for_each_mem_region(region) \
563 for (region = memblock.memory.regions; \
564 region < (memblock.memory.regions + memblock.memory.cnt); \
565 region++)
566
567
568
569
570
571#define for_each_reserved_mem_region(region) \
572 for (region = memblock.reserved.regions; \
573 region < (memblock.reserved.regions + memblock.reserved.cnt); \
574 region++)
575
576extern void *alloc_large_system_hash(const char *tablename,
577 unsigned long bucketsize,
578 unsigned long numentries,
579 int scale,
580 int flags,
581 unsigned int *_hash_shift,
582 unsigned int *_hash_mask,
583 unsigned long low_limit,
584 unsigned long high_limit);
585
586#define HASH_EARLY 0x00000001
587#define HASH_SMALL 0x00000002
588
589#define HASH_ZERO 0x00000004
590
591
592
593
594#ifdef CONFIG_NUMA
595#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
596extern int hashdist;
597#else
598#define hashdist (0)
599#endif
600
601#ifdef CONFIG_MEMTEST
602extern void early_memtest(phys_addr_t start, phys_addr_t end);
603#else
604static inline void early_memtest(phys_addr_t start, phys_addr_t end)
605{
606}
607#endif
608
609#endif
610
611#endif
612