1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/bitops.h>
13#include <linux/poison.h>
14#include <linux/pfn.h>
15#include <linux/debugfs.h>
16#include <linux/kmemleak.h>
17#include <linux/seq_file.h>
18#include <linux/memblock.h>
19
20#include <asm/sections.h>
21#include <linux/io.h>
22
23#include "internal.h"
24
25#define INIT_MEMBLOCK_REGIONS 128
26#define INIT_PHYSMEM_REGIONS 4
27
28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30#endif
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98#ifndef CONFIG_NEED_MULTIPLE_NODES
99struct pglist_data __refdata contig_page_data;
100EXPORT_SYMBOL(contig_page_data);
101#endif
102
103unsigned long max_low_pfn;
104unsigned long min_low_pfn;
105unsigned long max_pfn;
106unsigned long long max_possible_pfn;
107
108static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
109static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
110#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
111static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
112#endif
113
114struct memblock memblock __initdata_memblock = {
115 .memory.regions = memblock_memory_init_regions,
116 .memory.cnt = 1,
117 .memory.max = INIT_MEMBLOCK_REGIONS,
118 .memory.name = "memory",
119
120 .reserved.regions = memblock_reserved_init_regions,
121 .reserved.cnt = 1,
122 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
123 .reserved.name = "reserved",
124
125#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
126 .physmem.regions = memblock_physmem_init_regions,
127 .physmem.cnt = 1,
128 .physmem.max = INIT_PHYSMEM_REGIONS,
129 .physmem.name = "physmem",
130#endif
131
132 .bottom_up = false,
133 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
134};
135
136int memblock_debug __initdata_memblock;
137static bool system_has_some_mirror __initdata_memblock = false;
138static int memblock_can_resize __initdata_memblock;
139static int memblock_memory_in_slab __initdata_memblock = 0;
140static int memblock_reserved_in_slab __initdata_memblock = 0;
141
142static enum memblock_flags __init_memblock choose_memblock_flags(void)
143{
144 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
145}
146
147
148static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
149{
150 return *size = min(*size, PHYS_ADDR_MAX - base);
151}
152
153
154
155
156static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
157 phys_addr_t base2, phys_addr_t size2)
158{
159 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
160}
161
162bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
163 phys_addr_t base, phys_addr_t size)
164{
165 unsigned long i;
166
167 for (i = 0; i < type->cnt; i++)
168 if (memblock_addrs_overlap(base, size, type->regions[i].base,
169 type->regions[i].size))
170 break;
171 return i < type->cnt;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189static phys_addr_t __init_memblock
190__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
191 phys_addr_t size, phys_addr_t align, int nid,
192 enum memblock_flags flags)
193{
194 phys_addr_t this_start, this_end, cand;
195 u64 i;
196
197 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
198 this_start = clamp(this_start, start, end);
199 this_end = clamp(this_end, start, end);
200
201 cand = round_up(this_start, align);
202 if (cand < this_end && this_end - cand >= size)
203 return cand;
204 }
205
206 return 0;
207}
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224static phys_addr_t __init_memblock
225__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
226 phys_addr_t size, phys_addr_t align, int nid,
227 enum memblock_flags flags)
228{
229 phys_addr_t this_start, this_end, cand;
230 u64 i;
231
232 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
233 NULL) {
234 this_start = clamp(this_start, start, end);
235 this_end = clamp(this_end, start, end);
236
237 if (this_end < size)
238 continue;
239
240 cand = round_down(this_end - size, align);
241 if (cand >= this_start)
242 return cand;
243 }
244
245 return 0;
246}
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
272 phys_addr_t align, phys_addr_t start,
273 phys_addr_t end, int nid,
274 enum memblock_flags flags)
275{
276 phys_addr_t kernel_end, ret;
277
278
279 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
280 end == MEMBLOCK_ALLOC_KASAN)
281 end = memblock.current_limit;
282
283
284 start = max_t(phys_addr_t, start, PAGE_SIZE);
285 end = max(start, end);
286 kernel_end = __pa_symbol(_end);
287
288
289
290
291
292 if (memblock_bottom_up() && end > kernel_end) {
293 phys_addr_t bottom_up_start;
294
295
296 bottom_up_start = max(start, kernel_end);
297
298
299 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
300 size, align, nid, flags);
301 if (ret)
302 return ret;
303
304
305
306
307
308
309
310
311
312
313
314 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
315 "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
316 }
317
318 return __memblock_find_range_top_down(start, end, size, align, nid,
319 flags);
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
336 phys_addr_t end, phys_addr_t size,
337 phys_addr_t align)
338{
339 phys_addr_t ret;
340 enum memblock_flags flags = choose_memblock_flags();
341
342again:
343 ret = memblock_find_in_range_node(size, align, start, end,
344 NUMA_NO_NODE, flags);
345
346 if (!ret && (flags & MEMBLOCK_MIRROR)) {
347 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
348 &size);
349 flags &= ~MEMBLOCK_MIRROR;
350 goto again;
351 }
352
353 return ret;
354}
355
356static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
357{
358 type->total_size -= type->regions[r].size;
359 memmove(&type->regions[r], &type->regions[r + 1],
360 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
361 type->cnt--;
362
363
364 if (type->cnt == 0) {
365 WARN_ON(type->total_size != 0);
366 type->cnt = 1;
367 type->regions[0].base = 0;
368 type->regions[0].size = 0;
369 type->regions[0].flags = 0;
370 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
371 }
372}
373
374#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
375
376
377
378void __init memblock_discard(void)
379{
380 phys_addr_t addr, size;
381
382 if (memblock.reserved.regions != memblock_reserved_init_regions) {
383 addr = __pa(memblock.reserved.regions);
384 size = PAGE_ALIGN(sizeof(struct memblock_region) *
385 memblock.reserved.max);
386 __memblock_free_late(addr, size);
387 }
388
389 if (memblock.memory.regions != memblock_memory_init_regions) {
390 addr = __pa(memblock.memory.regions);
391 size = PAGE_ALIGN(sizeof(struct memblock_region) *
392 memblock.memory.max);
393 __memblock_free_late(addr, size);
394 }
395}
396#endif
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413static int __init_memblock memblock_double_array(struct memblock_type *type,
414 phys_addr_t new_area_start,
415 phys_addr_t new_area_size)
416{
417 struct memblock_region *new_array, *old_array;
418 phys_addr_t old_alloc_size, new_alloc_size;
419 phys_addr_t old_size, new_size, addr, new_end;
420 int use_slab = slab_is_available();
421 int *in_slab;
422
423
424
425
426 if (!memblock_can_resize)
427 return -1;
428
429
430 old_size = type->max * sizeof(struct memblock_region);
431 new_size = old_size << 1;
432
433
434
435
436 old_alloc_size = PAGE_ALIGN(old_size);
437 new_alloc_size = PAGE_ALIGN(new_size);
438
439
440 if (type == &memblock.memory)
441 in_slab = &memblock_memory_in_slab;
442 else
443 in_slab = &memblock_reserved_in_slab;
444
445
446 if (use_slab) {
447 new_array = kmalloc(new_size, GFP_KERNEL);
448 addr = new_array ? __pa(new_array) : 0;
449 } else {
450
451 if (type != &memblock.reserved)
452 new_area_start = new_area_size = 0;
453
454 addr = memblock_find_in_range(new_area_start + new_area_size,
455 memblock.current_limit,
456 new_alloc_size, PAGE_SIZE);
457 if (!addr && new_area_size)
458 addr = memblock_find_in_range(0,
459 min(new_area_start, memblock.current_limit),
460 new_alloc_size, PAGE_SIZE);
461
462 new_array = addr ? __va(addr) : NULL;
463 }
464 if (!addr) {
465 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
466 type->name, type->max, type->max * 2);
467 return -1;
468 }
469
470 new_end = addr + new_size - 1;
471 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
472 type->name, type->max * 2, &addr, &new_end);
473
474
475
476
477
478
479 memcpy(new_array, type->regions, old_size);
480 memset(new_array + type->max, 0, old_size);
481 old_array = type->regions;
482 type->regions = new_array;
483 type->max <<= 1;
484
485
486 if (*in_slab)
487 kfree(old_array);
488 else if (old_array != memblock_memory_init_regions &&
489 old_array != memblock_reserved_init_regions)
490 memblock_free(__pa(old_array), old_alloc_size);
491
492
493
494
495
496 if (!use_slab)
497 BUG_ON(memblock_reserve(addr, new_alloc_size));
498
499
500 *in_slab = use_slab;
501
502 return 0;
503}
504
505
506
507
508
509
510
511static void __init_memblock memblock_merge_regions(struct memblock_type *type)
512{
513 int i = 0;
514
515
516 while (i < type->cnt - 1) {
517 struct memblock_region *this = &type->regions[i];
518 struct memblock_region *next = &type->regions[i + 1];
519
520 if (this->base + this->size != next->base ||
521 memblock_get_region_node(this) !=
522 memblock_get_region_node(next) ||
523 this->flags != next->flags) {
524 BUG_ON(this->base + this->size > next->base);
525 i++;
526 continue;
527 }
528
529 this->size += next->size;
530
531 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
532 type->cnt--;
533 }
534}
535
536
537
538
539
540
541
542
543
544
545
546
547
548static void __init_memblock memblock_insert_region(struct memblock_type *type,
549 int idx, phys_addr_t base,
550 phys_addr_t size,
551 int nid,
552 enum memblock_flags flags)
553{
554 struct memblock_region *rgn = &type->regions[idx];
555
556 BUG_ON(type->cnt >= type->max);
557 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
558 rgn->base = base;
559 rgn->size = size;
560 rgn->flags = flags;
561 memblock_set_region_node(rgn, nid);
562 type->cnt++;
563 type->total_size += size;
564}
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582int __init_memblock memblock_add_range(struct memblock_type *type,
583 phys_addr_t base, phys_addr_t size,
584 int nid, enum memblock_flags flags)
585{
586 bool insert = false;
587 phys_addr_t obase = base;
588 phys_addr_t end = base + memblock_cap_size(base, &size);
589 int idx, nr_new;
590 struct memblock_region *rgn;
591
592 if (!size)
593 return 0;
594
595
596 if (type->regions[0].size == 0) {
597 WARN_ON(type->cnt != 1 || type->total_size);
598 type->regions[0].base = base;
599 type->regions[0].size = size;
600 type->regions[0].flags = flags;
601 memblock_set_region_node(&type->regions[0], nid);
602 type->total_size = size;
603 return 0;
604 }
605repeat:
606
607
608
609
610
611 base = obase;
612 nr_new = 0;
613
614 for_each_memblock_type(idx, type, rgn) {
615 phys_addr_t rbase = rgn->base;
616 phys_addr_t rend = rbase + rgn->size;
617
618 if (rbase >= end)
619 break;
620 if (rend <= base)
621 continue;
622
623
624
625
626 if (rbase > base) {
627#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
628 WARN_ON(nid != memblock_get_region_node(rgn));
629#endif
630 WARN_ON(flags != rgn->flags);
631 nr_new++;
632 if (insert)
633 memblock_insert_region(type, idx++, base,
634 rbase - base, nid,
635 flags);
636 }
637
638 base = min(rend, end);
639 }
640
641
642 if (base < end) {
643 nr_new++;
644 if (insert)
645 memblock_insert_region(type, idx, base, end - base,
646 nid, flags);
647 }
648
649 if (!nr_new)
650 return 0;
651
652
653
654
655
656 if (!insert) {
657 while (type->cnt + nr_new > type->max)
658 if (memblock_double_array(type, obase, size) < 0)
659 return -ENOMEM;
660 insert = true;
661 goto repeat;
662 } else {
663 memblock_merge_regions(type);
664 return 0;
665 }
666}
667
668
669
670
671
672
673
674
675
676
677
678
679
680int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
681 int nid)
682{
683 return memblock_add_range(&memblock.memory, base, size, nid, 0);
684}
685
686
687
688
689
690
691
692
693
694
695
696
697int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
698{
699 phys_addr_t end = base + size - 1;
700
701 memblock_dbg("memblock_add: [%pa-%pa] %pS\n",
702 &base, &end, (void *)_RET_IP_);
703
704 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
705}
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723static int __init_memblock memblock_isolate_range(struct memblock_type *type,
724 phys_addr_t base, phys_addr_t size,
725 int *start_rgn, int *end_rgn)
726{
727 phys_addr_t end = base + memblock_cap_size(base, &size);
728 int idx;
729 struct memblock_region *rgn;
730
731 *start_rgn = *end_rgn = 0;
732
733 if (!size)
734 return 0;
735
736
737 while (type->cnt + 2 > type->max)
738 if (memblock_double_array(type, base, size) < 0)
739 return -ENOMEM;
740
741 for_each_memblock_type(idx, type, rgn) {
742 phys_addr_t rbase = rgn->base;
743 phys_addr_t rend = rbase + rgn->size;
744
745 if (rbase >= end)
746 break;
747 if (rend <= base)
748 continue;
749
750 if (rbase < base) {
751
752
753
754
755 rgn->base = base;
756 rgn->size -= base - rbase;
757 type->total_size -= base - rbase;
758 memblock_insert_region(type, idx, rbase, base - rbase,
759 memblock_get_region_node(rgn),
760 rgn->flags);
761 } else if (rend > end) {
762
763
764
765
766 rgn->base = end;
767 rgn->size -= end - rbase;
768 type->total_size -= end - rbase;
769 memblock_insert_region(type, idx--, rbase, end - rbase,
770 memblock_get_region_node(rgn),
771 rgn->flags);
772 } else {
773
774 if (!*end_rgn)
775 *start_rgn = idx;
776 *end_rgn = idx + 1;
777 }
778 }
779
780 return 0;
781}
782
783static int __init_memblock memblock_remove_range(struct memblock_type *type,
784 phys_addr_t base, phys_addr_t size)
785{
786 int start_rgn, end_rgn;
787 int i, ret;
788
789 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
790 if (ret)
791 return ret;
792
793 for (i = end_rgn - 1; i >= start_rgn; i--)
794 memblock_remove_region(type, i);
795 return 0;
796}
797
798int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
799{
800 phys_addr_t end = base + size - 1;
801
802 memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
803 &base, &end, (void *)_RET_IP_);
804
805 return memblock_remove_range(&memblock.memory, base, size);
806}
807
808
809
810
811
812
813
814
815
816int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
817{
818 phys_addr_t end = base + size - 1;
819
820 memblock_dbg(" memblock_free: [%pa-%pa] %pS\n",
821 &base, &end, (void *)_RET_IP_);
822
823 kmemleak_free_part_phys(base, size);
824 return memblock_remove_range(&memblock.reserved, base, size);
825}
826
827int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
828{
829 phys_addr_t end = base + size - 1;
830
831 memblock_dbg("memblock_reserve: [%pa-%pa] %pS\n",
832 &base, &end, (void *)_RET_IP_);
833
834 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
835}
836
837
838
839
840
841
842
843
844
845
846
847
848static int __init_memblock memblock_setclr_flag(phys_addr_t base,
849 phys_addr_t size, int set, int flag)
850{
851 struct memblock_type *type = &memblock.memory;
852 int i, ret, start_rgn, end_rgn;
853
854 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
855 if (ret)
856 return ret;
857
858 for (i = start_rgn; i < end_rgn; i++) {
859 struct memblock_region *r = &type->regions[i];
860
861 if (set)
862 r->flags |= flag;
863 else
864 r->flags &= ~flag;
865 }
866
867 memblock_merge_regions(type);
868 return 0;
869}
870
871
872
873
874
875
876
877
878int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
879{
880 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
881}
882
883
884
885
886
887
888
889
890int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
891{
892 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
893}
894
895
896
897
898
899
900
901
902int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
903{
904 system_has_some_mirror = true;
905
906 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
907}
908
909
910
911
912
913
914
915
916int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
917{
918 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
919}
920
921
922
923
924
925
926
927
928int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
929{
930 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
931}
932
933
934
935
936
937
938
939
940
941void __init_memblock __next_reserved_mem_region(u64 *idx,
942 phys_addr_t *out_start,
943 phys_addr_t *out_end)
944{
945 struct memblock_type *type = &memblock.reserved;
946
947 if (*idx < type->cnt) {
948 struct memblock_region *r = &type->regions[*idx];
949 phys_addr_t base = r->base;
950 phys_addr_t size = r->size;
951
952 if (out_start)
953 *out_start = base;
954 if (out_end)
955 *out_end = base + size - 1;
956
957 *idx += 1;
958 return;
959 }
960
961
962 *idx = ULLONG_MAX;
963}
964
965static bool should_skip_region(struct memblock_region *m, int nid, int flags)
966{
967 int m_nid = memblock_get_region_node(m);
968
969
970 if (nid != NUMA_NO_NODE && nid != m_nid)
971 return true;
972
973
974 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
975 return true;
976
977
978 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
979 return true;
980
981
982 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
983 return true;
984
985 return false;
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014void __init_memblock __next_mem_range(u64 *idx, int nid,
1015 enum memblock_flags flags,
1016 struct memblock_type *type_a,
1017 struct memblock_type *type_b,
1018 phys_addr_t *out_start,
1019 phys_addr_t *out_end, int *out_nid)
1020{
1021 int idx_a = *idx & 0xffffffff;
1022 int idx_b = *idx >> 32;
1023
1024 if (WARN_ONCE(nid == MAX_NUMNODES,
1025 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1026 nid = NUMA_NO_NODE;
1027
1028 for (; idx_a < type_a->cnt; idx_a++) {
1029 struct memblock_region *m = &type_a->regions[idx_a];
1030
1031 phys_addr_t m_start = m->base;
1032 phys_addr_t m_end = m->base + m->size;
1033 int m_nid = memblock_get_region_node(m);
1034
1035 if (should_skip_region(m, nid, flags))
1036 continue;
1037
1038 if (!type_b) {
1039 if (out_start)
1040 *out_start = m_start;
1041 if (out_end)
1042 *out_end = m_end;
1043 if (out_nid)
1044 *out_nid = m_nid;
1045 idx_a++;
1046 *idx = (u32)idx_a | (u64)idx_b << 32;
1047 return;
1048 }
1049
1050
1051 for (; idx_b < type_b->cnt + 1; idx_b++) {
1052 struct memblock_region *r;
1053 phys_addr_t r_start;
1054 phys_addr_t r_end;
1055
1056 r = &type_b->regions[idx_b];
1057 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1058 r_end = idx_b < type_b->cnt ?
1059 r->base : PHYS_ADDR_MAX;
1060
1061
1062
1063
1064
1065 if (r_start >= m_end)
1066 break;
1067
1068 if (m_start < r_end) {
1069 if (out_start)
1070 *out_start =
1071 max(m_start, r_start);
1072 if (out_end)
1073 *out_end = min(m_end, r_end);
1074 if (out_nid)
1075 *out_nid = m_nid;
1076
1077
1078
1079
1080 if (m_end <= r_end)
1081 idx_a++;
1082 else
1083 idx_b++;
1084 *idx = (u32)idx_a | (u64)idx_b << 32;
1085 return;
1086 }
1087 }
1088 }
1089
1090
1091 *idx = ULLONG_MAX;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1112 enum memblock_flags flags,
1113 struct memblock_type *type_a,
1114 struct memblock_type *type_b,
1115 phys_addr_t *out_start,
1116 phys_addr_t *out_end, int *out_nid)
1117{
1118 int idx_a = *idx & 0xffffffff;
1119 int idx_b = *idx >> 32;
1120
1121 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1122 nid = NUMA_NO_NODE;
1123
1124 if (*idx == (u64)ULLONG_MAX) {
1125 idx_a = type_a->cnt - 1;
1126 if (type_b != NULL)
1127 idx_b = type_b->cnt;
1128 else
1129 idx_b = 0;
1130 }
1131
1132 for (; idx_a >= 0; idx_a--) {
1133 struct memblock_region *m = &type_a->regions[idx_a];
1134
1135 phys_addr_t m_start = m->base;
1136 phys_addr_t m_end = m->base + m->size;
1137 int m_nid = memblock_get_region_node(m);
1138
1139 if (should_skip_region(m, nid, flags))
1140 continue;
1141
1142 if (!type_b) {
1143 if (out_start)
1144 *out_start = m_start;
1145 if (out_end)
1146 *out_end = m_end;
1147 if (out_nid)
1148 *out_nid = m_nid;
1149 idx_a--;
1150 *idx = (u32)idx_a | (u64)idx_b << 32;
1151 return;
1152 }
1153
1154
1155 for (; idx_b >= 0; idx_b--) {
1156 struct memblock_region *r;
1157 phys_addr_t r_start;
1158 phys_addr_t r_end;
1159
1160 r = &type_b->regions[idx_b];
1161 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1162 r_end = idx_b < type_b->cnt ?
1163 r->base : PHYS_ADDR_MAX;
1164
1165
1166
1167
1168
1169 if (r_end <= m_start)
1170 break;
1171
1172 if (m_end > r_start) {
1173 if (out_start)
1174 *out_start = max(m_start, r_start);
1175 if (out_end)
1176 *out_end = min(m_end, r_end);
1177 if (out_nid)
1178 *out_nid = m_nid;
1179 if (m_start >= r_start)
1180 idx_a--;
1181 else
1182 idx_b--;
1183 *idx = (u32)idx_a | (u64)idx_b << 32;
1184 return;
1185 }
1186 }
1187 }
1188
1189 *idx = ULLONG_MAX;
1190}
1191
1192#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1193
1194
1195
1196void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1197 unsigned long *out_start_pfn,
1198 unsigned long *out_end_pfn, int *out_nid)
1199{
1200 struct memblock_type *type = &memblock.memory;
1201 struct memblock_region *r;
1202
1203 while (++*idx < type->cnt) {
1204 r = &type->regions[*idx];
1205
1206 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1207 continue;
1208 if (nid == MAX_NUMNODES || nid == r->nid)
1209 break;
1210 }
1211 if (*idx >= type->cnt) {
1212 *idx = -1;
1213 return;
1214 }
1215
1216 if (out_start_pfn)
1217 *out_start_pfn = PFN_UP(r->base);
1218 if (out_end_pfn)
1219 *out_end_pfn = PFN_DOWN(r->base + r->size);
1220 if (out_nid)
1221 *out_nid = r->nid;
1222}
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1238 struct memblock_type *type, int nid)
1239{
1240 int start_rgn, end_rgn;
1241 int i, ret;
1242
1243 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1244 if (ret)
1245 return ret;
1246
1247 for (i = start_rgn; i < end_rgn; i++)
1248 memblock_set_region_node(&type->regions[i], nid);
1249
1250 memblock_merge_regions(type);
1251 return 0;
1252}
1253#endif
1254#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270void __init_memblock
1271__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1272 unsigned long *out_spfn, unsigned long *out_epfn)
1273{
1274 int zone_nid = zone_to_nid(zone);
1275 phys_addr_t spa, epa;
1276 int nid;
1277
1278 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1279 &memblock.memory, &memblock.reserved,
1280 &spa, &epa, &nid);
1281
1282 while (*idx != U64_MAX) {
1283 unsigned long epfn = PFN_DOWN(epa);
1284 unsigned long spfn = PFN_UP(spa);
1285
1286
1287
1288
1289
1290 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1291
1292 if (zone_end_pfn(zone) <= spfn) {
1293 *idx = U64_MAX;
1294 break;
1295 }
1296
1297 if (out_spfn)
1298 *out_spfn = max(zone->zone_start_pfn, spfn);
1299 if (out_epfn)
1300 *out_epfn = min(zone_end_pfn(zone), epfn);
1301
1302 return;
1303 }
1304
1305 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1306 &memblock.memory, &memblock.reserved,
1307 &spa, &epa, &nid);
1308 }
1309
1310
1311 if (out_spfn)
1312 *out_spfn = ULONG_MAX;
1313 if (out_epfn)
1314 *out_epfn = 0;
1315}
1316
1317#endif
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1344 phys_addr_t align, phys_addr_t start,
1345 phys_addr_t end, int nid)
1346{
1347 enum memblock_flags flags = choose_memblock_flags();
1348 phys_addr_t found;
1349
1350 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1351 nid = NUMA_NO_NODE;
1352
1353 if (!align) {
1354
1355 dump_stack();
1356 align = SMP_CACHE_BYTES;
1357 }
1358
1359 if (end > memblock.current_limit)
1360 end = memblock.current_limit;
1361
1362again:
1363 found = memblock_find_in_range_node(size, align, start, end, nid,
1364 flags);
1365 if (found && !memblock_reserve(found, size))
1366 goto done;
1367
1368 if (nid != NUMA_NO_NODE) {
1369 found = memblock_find_in_range_node(size, align, start,
1370 end, NUMA_NO_NODE,
1371 flags);
1372 if (found && !memblock_reserve(found, size))
1373 goto done;
1374 }
1375
1376 if (flags & MEMBLOCK_MIRROR) {
1377 flags &= ~MEMBLOCK_MIRROR;
1378 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1379 &size);
1380 goto again;
1381 }
1382
1383 return 0;
1384
1385done:
1386
1387 if (end != MEMBLOCK_ALLOC_KASAN)
1388
1389
1390
1391
1392
1393
1394 kmemleak_alloc_phys(found, size, 0, 0);
1395
1396 return found;
1397}
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1412 phys_addr_t align,
1413 phys_addr_t start,
1414 phys_addr_t end)
1415{
1416 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1433{
1434 return memblock_alloc_range_nid(size, align, 0,
1435 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
1436}
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457static void * __init memblock_alloc_internal(
1458 phys_addr_t size, phys_addr_t align,
1459 phys_addr_t min_addr, phys_addr_t max_addr,
1460 int nid)
1461{
1462 phys_addr_t alloc;
1463
1464
1465
1466
1467
1468
1469 if (WARN_ON_ONCE(slab_is_available()))
1470 return kzalloc_node(size, GFP_NOWAIT, nid);
1471
1472 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
1473
1474
1475 if (!alloc && min_addr)
1476 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid);
1477
1478 if (!alloc)
1479 return NULL;
1480
1481 return phys_to_virt(alloc);
1482}
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503void * __init memblock_alloc_try_nid_raw(
1504 phys_addr_t size, phys_addr_t align,
1505 phys_addr_t min_addr, phys_addr_t max_addr,
1506 int nid)
1507{
1508 void *ptr;
1509
1510 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1511 __func__, (u64)size, (u64)align, nid, &min_addr,
1512 &max_addr, (void *)_RET_IP_);
1513
1514 ptr = memblock_alloc_internal(size, align,
1515 min_addr, max_addr, nid);
1516 if (ptr && size > 0)
1517 page_init_poison(ptr, size);
1518
1519 return ptr;
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539void * __init memblock_alloc_try_nid(
1540 phys_addr_t size, phys_addr_t align,
1541 phys_addr_t min_addr, phys_addr_t max_addr,
1542 int nid)
1543{
1544 void *ptr;
1545
1546 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1547 __func__, (u64)size, (u64)align, nid, &min_addr,
1548 &max_addr, (void *)_RET_IP_);
1549 ptr = memblock_alloc_internal(size, align,
1550 min_addr, max_addr, nid);
1551 if (ptr)
1552 memset(ptr, 0, size);
1553
1554 return ptr;
1555}
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1567{
1568 phys_addr_t cursor, end;
1569
1570 end = base + size - 1;
1571 memblock_dbg("%s: [%pa-%pa] %pS\n",
1572 __func__, &base, &end, (void *)_RET_IP_);
1573 kmemleak_free_part_phys(base, size);
1574 cursor = PFN_UP(base);
1575 end = PFN_DOWN(base + size);
1576
1577 for (; cursor < end; cursor++) {
1578 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1579 totalram_pages_inc();
1580 }
1581}
1582
1583
1584
1585
1586
1587phys_addr_t __init_memblock memblock_phys_mem_size(void)
1588{
1589 return memblock.memory.total_size;
1590}
1591
1592phys_addr_t __init_memblock memblock_reserved_size(void)
1593{
1594 return memblock.reserved.total_size;
1595}
1596
1597phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1598{
1599 unsigned long pages = 0;
1600 struct memblock_region *r;
1601 unsigned long start_pfn, end_pfn;
1602
1603 for_each_memblock(memory, r) {
1604 start_pfn = memblock_region_memory_base_pfn(r);
1605 end_pfn = memblock_region_memory_end_pfn(r);
1606 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1607 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1608 pages += end_pfn - start_pfn;
1609 }
1610
1611 return PFN_PHYS(pages);
1612}
1613
1614
1615phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1616{
1617 return memblock.memory.regions[0].base;
1618}
1619
1620phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1621{
1622 int idx = memblock.memory.cnt - 1;
1623
1624 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1625}
1626
1627static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1628{
1629 phys_addr_t max_addr = PHYS_ADDR_MAX;
1630 struct memblock_region *r;
1631
1632
1633
1634
1635
1636
1637 for_each_memblock(memory, r) {
1638 if (limit <= r->size) {
1639 max_addr = r->base + limit;
1640 break;
1641 }
1642 limit -= r->size;
1643 }
1644
1645 return max_addr;
1646}
1647
1648void __init memblock_enforce_memory_limit(phys_addr_t limit)
1649{
1650 phys_addr_t max_addr = PHYS_ADDR_MAX;
1651
1652 if (!limit)
1653 return;
1654
1655 max_addr = __find_max_addr(limit);
1656
1657
1658 if (max_addr == PHYS_ADDR_MAX)
1659 return;
1660
1661
1662 memblock_remove_range(&memblock.memory, max_addr,
1663 PHYS_ADDR_MAX);
1664 memblock_remove_range(&memblock.reserved, max_addr,
1665 PHYS_ADDR_MAX);
1666}
1667
1668void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1669{
1670 int start_rgn, end_rgn;
1671 int i, ret;
1672
1673 if (!size)
1674 return;
1675
1676 ret = memblock_isolate_range(&memblock.memory, base, size,
1677 &start_rgn, &end_rgn);
1678 if (ret)
1679 return;
1680
1681
1682 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1683 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1684 memblock_remove_region(&memblock.memory, i);
1685
1686 for (i = start_rgn - 1; i >= 0; i--)
1687 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1688 memblock_remove_region(&memblock.memory, i);
1689
1690
1691 memblock_remove_range(&memblock.reserved, 0, base);
1692 memblock_remove_range(&memblock.reserved,
1693 base + size, PHYS_ADDR_MAX);
1694}
1695
1696void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1697{
1698 phys_addr_t max_addr;
1699
1700 if (!limit)
1701 return;
1702
1703 max_addr = __find_max_addr(limit);
1704
1705
1706 if (max_addr == PHYS_ADDR_MAX)
1707 return;
1708
1709 memblock_cap_memory_range(0, max_addr);
1710}
1711
1712static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1713{
1714 unsigned int left = 0, right = type->cnt;
1715
1716 do {
1717 unsigned int mid = (right + left) / 2;
1718
1719 if (addr < type->regions[mid].base)
1720 right = mid;
1721 else if (addr >= (type->regions[mid].base +
1722 type->regions[mid].size))
1723 left = mid + 1;
1724 else
1725 return mid;
1726 } while (left < right);
1727 return -1;
1728}
1729
1730bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1731{
1732 return memblock_search(&memblock.reserved, addr) != -1;
1733}
1734
1735bool __init_memblock memblock_is_memory(phys_addr_t addr)
1736{
1737 return memblock_search(&memblock.memory, addr) != -1;
1738}
1739
1740bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1741{
1742 int i = memblock_search(&memblock.memory, addr);
1743
1744 if (i == -1)
1745 return false;
1746 return !memblock_is_nomap(&memblock.memory.regions[i]);
1747}
1748
1749#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1750int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1751 unsigned long *start_pfn, unsigned long *end_pfn)
1752{
1753 struct memblock_type *type = &memblock.memory;
1754 int mid = memblock_search(type, PFN_PHYS(pfn));
1755
1756 if (mid == -1)
1757 return -1;
1758
1759 *start_pfn = PFN_DOWN(type->regions[mid].base);
1760 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1761
1762 return type->regions[mid].nid;
1763}
1764#endif
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1777{
1778 int idx = memblock_search(&memblock.memory, base);
1779 phys_addr_t end = base + memblock_cap_size(base, &size);
1780
1781 if (idx == -1)
1782 return false;
1783 return (memblock.memory.regions[idx].base +
1784 memblock.memory.regions[idx].size) >= end;
1785}
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1799{
1800 memblock_cap_size(base, &size);
1801 return memblock_overlaps_region(&memblock.reserved, base, size);
1802}
1803
1804void __init_memblock memblock_trim_memory(phys_addr_t align)
1805{
1806 phys_addr_t start, end, orig_start, orig_end;
1807 struct memblock_region *r;
1808
1809 for_each_memblock(memory, r) {
1810 orig_start = r->base;
1811 orig_end = r->base + r->size;
1812 start = round_up(orig_start, align);
1813 end = round_down(orig_end, align);
1814
1815 if (start == orig_start && end == orig_end)
1816 continue;
1817
1818 if (start < end) {
1819 r->base = start;
1820 r->size = end - start;
1821 } else {
1822 memblock_remove_region(&memblock.memory,
1823 r - memblock.memory.regions);
1824 r--;
1825 }
1826 }
1827}
1828
1829void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1830{
1831 memblock.current_limit = limit;
1832}
1833
1834phys_addr_t __init_memblock memblock_get_current_limit(void)
1835{
1836 return memblock.current_limit;
1837}
1838
1839static void __init_memblock memblock_dump(struct memblock_type *type)
1840{
1841 phys_addr_t base, end, size;
1842 enum memblock_flags flags;
1843 int idx;
1844 struct memblock_region *rgn;
1845
1846 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1847
1848 for_each_memblock_type(idx, type, rgn) {
1849 char nid_buf[32] = "";
1850
1851 base = rgn->base;
1852 size = rgn->size;
1853 end = base + size - 1;
1854 flags = rgn->flags;
1855#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1856 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1857 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1858 memblock_get_region_node(rgn));
1859#endif
1860 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1861 type->name, idx, &base, &end, &size, nid_buf, flags);
1862 }
1863}
1864
1865void __init_memblock __memblock_dump_all(void)
1866{
1867 pr_info("MEMBLOCK configuration:\n");
1868 pr_info(" memory size = %pa reserved size = %pa\n",
1869 &memblock.memory.total_size,
1870 &memblock.reserved.total_size);
1871
1872 memblock_dump(&memblock.memory);
1873 memblock_dump(&memblock.reserved);
1874#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1875 memblock_dump(&memblock.physmem);
1876#endif
1877}
1878
1879void __init memblock_allow_resize(void)
1880{
1881 memblock_can_resize = 1;
1882}
1883
1884static int __init early_memblock(char *p)
1885{
1886 if (p && strstr(p, "debug"))
1887 memblock_debug = 1;
1888 return 0;
1889}
1890early_param("memblock", early_memblock);
1891
1892static void __init __free_pages_memory(unsigned long start, unsigned long end)
1893{
1894 int order;
1895
1896 while (start < end) {
1897 order = min(MAX_ORDER - 1UL, __ffs(start));
1898
1899 while (start + (1UL << order) > end)
1900 order--;
1901
1902 memblock_free_pages(pfn_to_page(start), start, order);
1903
1904 start += (1UL << order);
1905 }
1906}
1907
1908static unsigned long __init __free_memory_core(phys_addr_t start,
1909 phys_addr_t end)
1910{
1911 unsigned long start_pfn = PFN_UP(start);
1912 unsigned long end_pfn = min_t(unsigned long,
1913 PFN_DOWN(end), max_low_pfn);
1914
1915 if (start_pfn >= end_pfn)
1916 return 0;
1917
1918 __free_pages_memory(start_pfn, end_pfn);
1919
1920 return end_pfn - start_pfn;
1921}
1922
1923static unsigned long __init free_low_memory_core_early(void)
1924{
1925 unsigned long count = 0;
1926 phys_addr_t start, end;
1927 u64 i;
1928
1929 memblock_clear_hotplug(0, -1);
1930
1931 for_each_reserved_mem_region(i, &start, &end)
1932 reserve_bootmem_region(start, end);
1933
1934
1935
1936
1937
1938
1939 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
1940 NULL)
1941 count += __free_memory_core(start, end);
1942
1943 return count;
1944}
1945
1946static int reset_managed_pages_done __initdata;
1947
1948void reset_node_managed_pages(pg_data_t *pgdat)
1949{
1950 struct zone *z;
1951
1952 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1953 atomic_long_set(&z->managed_pages, 0);
1954}
1955
1956void __init reset_all_zones_managed_pages(void)
1957{
1958 struct pglist_data *pgdat;
1959
1960 if (reset_managed_pages_done)
1961 return;
1962
1963 for_each_online_pgdat(pgdat)
1964 reset_node_managed_pages(pgdat);
1965
1966 reset_managed_pages_done = 1;
1967}
1968
1969
1970
1971
1972
1973
1974unsigned long __init memblock_free_all(void)
1975{
1976 unsigned long pages;
1977
1978 reset_all_zones_managed_pages();
1979
1980 pages = free_low_memory_core_early();
1981 totalram_pages_add(pages);
1982
1983 return pages;
1984}
1985
1986#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
1987
1988static int memblock_debug_show(struct seq_file *m, void *private)
1989{
1990 struct memblock_type *type = m->private;
1991 struct memblock_region *reg;
1992 int i;
1993 phys_addr_t end;
1994
1995 for (i = 0; i < type->cnt; i++) {
1996 reg = &type->regions[i];
1997 end = reg->base + reg->size - 1;
1998
1999 seq_printf(m, "%4d: ", i);
2000 seq_printf(m, "%pa..%pa\n", ®->base, &end);
2001 }
2002 return 0;
2003}
2004DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2005
2006static int __init memblock_init_debugfs(void)
2007{
2008 struct dentry *root = debugfs_create_dir("memblock", NULL);
2009
2010 debugfs_create_file("memory", 0444, root,
2011 &memblock.memory, &memblock_debug_fops);
2012 debugfs_create_file("reserved", 0444, root,
2013 &memblock.reserved, &memblock_debug_fops);
2014#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2015 debugfs_create_file("physmem", 0444, root,
2016 &memblock.physmem, &memblock_debug_fops);
2017#endif
2018
2019 return 0;
2020}
2021__initcall(memblock_init_debugfs);
2022
2023#endif
2024