1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/bitops.h>
13#include <linux/poison.h>
14#include <linux/pfn.h>
15#include <linux/debugfs.h>
16#include <linux/kmemleak.h>
17#include <linux/seq_file.h>
18#include <linux/memblock.h>
19
20#include <asm/sections.h>
21#include <linux/io.h>
22
23#include "internal.h"
24
25#define INIT_MEMBLOCK_REGIONS 128
26#define INIT_PHYSMEM_REGIONS 4
27
28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30#endif
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#ifndef CONFIG_NEED_MULTIPLE_NODES
96struct pglist_data __refdata contig_page_data;
97EXPORT_SYMBOL(contig_page_data);
98#endif
99
100unsigned long max_low_pfn;
101unsigned long min_low_pfn;
102unsigned long max_pfn;
103unsigned long long max_possible_pfn;
104
105static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
106static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
107#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
108static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
109#endif
110
111struct memblock memblock __initdata_memblock = {
112 .memory.regions = memblock_memory_init_regions,
113 .memory.cnt = 1,
114 .memory.max = INIT_MEMBLOCK_REGIONS,
115 .memory.name = "memory",
116
117 .reserved.regions = memblock_reserved_init_regions,
118 .reserved.cnt = 1,
119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
120 .reserved.name = "reserved",
121
122 .bottom_up = false,
123 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
124};
125
126#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127struct memblock_type physmem = {
128 .regions = memblock_physmem_init_regions,
129 .cnt = 1,
130 .max = INIT_PHYSMEM_REGIONS,
131 .name = "physmem",
132};
133#endif
134
135
136
137
138
139
140
141static __refdata struct memblock_type *memblock_memory = &memblock.memory;
142
143#define for_each_memblock_type(i, memblock_type, rgn) \
144 for (i = 0, rgn = &memblock_type->regions[0]; \
145 i < memblock_type->cnt; \
146 i++, rgn = &memblock_type->regions[i])
147
148#define memblock_dbg(fmt, ...) \
149 do { \
150 if (memblock_debug) \
151 pr_info(fmt, ##__VA_ARGS__); \
152 } while (0)
153
154static int memblock_debug __initdata_memblock;
155static bool system_has_some_mirror __initdata_memblock = false;
156static int memblock_can_resize __initdata_memblock;
157static int memblock_memory_in_slab __initdata_memblock = 0;
158static int memblock_reserved_in_slab __initdata_memblock = 0;
159
160static enum memblock_flags __init_memblock choose_memblock_flags(void)
161{
162 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
163}
164
165
166static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
167{
168 return *size = min(*size, PHYS_ADDR_MAX - base);
169}
170
171
172
173
174static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
175 phys_addr_t base2, phys_addr_t size2)
176{
177 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
178}
179
180bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
181 phys_addr_t base, phys_addr_t size)
182{
183 unsigned long i;
184
185 for (i = 0; i < type->cnt; i++)
186 if (memblock_addrs_overlap(base, size, type->regions[i].base,
187 type->regions[i].size))
188 break;
189 return i < type->cnt;
190}
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207static phys_addr_t __init_memblock
208__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
209 phys_addr_t size, phys_addr_t align, int nid,
210 enum memblock_flags flags)
211{
212 phys_addr_t this_start, this_end, cand;
213 u64 i;
214
215 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
216 this_start = clamp(this_start, start, end);
217 this_end = clamp(this_end, start, end);
218
219 cand = round_up(this_start, align);
220 if (cand < this_end && this_end - cand >= size)
221 return cand;
222 }
223
224 return 0;
225}
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242static phys_addr_t __init_memblock
243__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
244 phys_addr_t size, phys_addr_t align, int nid,
245 enum memblock_flags flags)
246{
247 phys_addr_t this_start, this_end, cand;
248 u64 i;
249
250 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
251 NULL) {
252 this_start = clamp(this_start, start, end);
253 this_end = clamp(this_end, start, end);
254
255 if (this_end < size)
256 continue;
257
258 cand = round_down(this_end - size, align);
259 if (cand >= this_start)
260 return cand;
261 }
262
263 return 0;
264}
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
290 phys_addr_t align, phys_addr_t start,
291 phys_addr_t end, int nid,
292 enum memblock_flags flags)
293{
294 phys_addr_t kernel_end, ret;
295
296
297 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
298 end == MEMBLOCK_ALLOC_KASAN)
299 end = memblock.current_limit;
300
301
302 start = max_t(phys_addr_t, start, PAGE_SIZE);
303 end = max(start, end);
304 kernel_end = __pa_symbol(_end);
305
306
307
308
309
310 if (memblock_bottom_up() && end > kernel_end) {
311 phys_addr_t bottom_up_start;
312
313
314 bottom_up_start = max(start, kernel_end);
315
316
317 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
318 size, align, nid, flags);
319 if (ret)
320 return ret;
321
322
323
324
325
326
327
328
329
330
331
332 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
333 "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
334 }
335
336 return __memblock_find_range_top_down(start, end, size, align, nid,
337 flags);
338}
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
354 phys_addr_t end, phys_addr_t size,
355 phys_addr_t align)
356{
357 phys_addr_t ret;
358 enum memblock_flags flags = choose_memblock_flags();
359
360again:
361 ret = memblock_find_in_range_node(size, align, start, end,
362 NUMA_NO_NODE, flags);
363
364 if (!ret && (flags & MEMBLOCK_MIRROR)) {
365 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
366 &size);
367 flags &= ~MEMBLOCK_MIRROR;
368 goto again;
369 }
370
371 return ret;
372}
373
374static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
375{
376 type->total_size -= type->regions[r].size;
377 memmove(&type->regions[r], &type->regions[r + 1],
378 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
379 type->cnt--;
380
381
382 if (type->cnt == 0) {
383 WARN_ON(type->total_size != 0);
384 type->cnt = 1;
385 type->regions[0].base = 0;
386 type->regions[0].size = 0;
387 type->regions[0].flags = 0;
388 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
389 }
390}
391
392#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
393
394
395
396void __init memblock_discard(void)
397{
398 phys_addr_t addr, size;
399
400 if (memblock.reserved.regions != memblock_reserved_init_regions) {
401 addr = __pa(memblock.reserved.regions);
402 size = PAGE_ALIGN(sizeof(struct memblock_region) *
403 memblock.reserved.max);
404 __memblock_free_late(addr, size);
405 }
406
407 if (memblock.memory.regions != memblock_memory_init_regions) {
408 addr = __pa(memblock.memory.regions);
409 size = PAGE_ALIGN(sizeof(struct memblock_region) *
410 memblock.memory.max);
411 __memblock_free_late(addr, size);
412 }
413
414 memblock_memory = NULL;
415}
416#endif
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433static int __init_memblock memblock_double_array(struct memblock_type *type,
434 phys_addr_t new_area_start,
435 phys_addr_t new_area_size)
436{
437 struct memblock_region *new_array, *old_array;
438 phys_addr_t old_alloc_size, new_alloc_size;
439 phys_addr_t old_size, new_size, addr, new_end;
440 int use_slab = slab_is_available();
441 int *in_slab;
442
443
444
445
446 if (!memblock_can_resize)
447 return -1;
448
449
450 old_size = type->max * sizeof(struct memblock_region);
451 new_size = old_size << 1;
452
453
454
455
456 old_alloc_size = PAGE_ALIGN(old_size);
457 new_alloc_size = PAGE_ALIGN(new_size);
458
459
460 if (type == &memblock.memory)
461 in_slab = &memblock_memory_in_slab;
462 else
463 in_slab = &memblock_reserved_in_slab;
464
465
466 if (use_slab) {
467 new_array = kmalloc(new_size, GFP_KERNEL);
468 addr = new_array ? __pa(new_array) : 0;
469 } else {
470
471 if (type != &memblock.reserved)
472 new_area_start = new_area_size = 0;
473
474 addr = memblock_find_in_range(new_area_start + new_area_size,
475 memblock.current_limit,
476 new_alloc_size, PAGE_SIZE);
477 if (!addr && new_area_size)
478 addr = memblock_find_in_range(0,
479 min(new_area_start, memblock.current_limit),
480 new_alloc_size, PAGE_SIZE);
481
482 new_array = addr ? __va(addr) : NULL;
483 }
484 if (!addr) {
485 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
486 type->name, type->max, type->max * 2);
487 return -1;
488 }
489
490 new_end = addr + new_size - 1;
491 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
492 type->name, type->max * 2, &addr, &new_end);
493
494
495
496
497
498
499 memcpy(new_array, type->regions, old_size);
500 memset(new_array + type->max, 0, old_size);
501 old_array = type->regions;
502 type->regions = new_array;
503 type->max <<= 1;
504
505
506 if (*in_slab)
507 kfree(old_array);
508 else if (old_array != memblock_memory_init_regions &&
509 old_array != memblock_reserved_init_regions)
510 memblock_free(__pa(old_array), old_alloc_size);
511
512
513
514
515
516 if (!use_slab)
517 BUG_ON(memblock_reserve(addr, new_alloc_size));
518
519
520 *in_slab = use_slab;
521
522 return 0;
523}
524
525
526
527
528
529
530
531static void __init_memblock memblock_merge_regions(struct memblock_type *type)
532{
533 int i = 0;
534
535
536 while (i < type->cnt - 1) {
537 struct memblock_region *this = &type->regions[i];
538 struct memblock_region *next = &type->regions[i + 1];
539
540 if (this->base + this->size != next->base ||
541 memblock_get_region_node(this) !=
542 memblock_get_region_node(next) ||
543 this->flags != next->flags) {
544 BUG_ON(this->base + this->size > next->base);
545 i++;
546 continue;
547 }
548
549 this->size += next->size;
550
551 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
552 type->cnt--;
553 }
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568static void __init_memblock memblock_insert_region(struct memblock_type *type,
569 int idx, phys_addr_t base,
570 phys_addr_t size,
571 int nid,
572 enum memblock_flags flags)
573{
574 struct memblock_region *rgn = &type->regions[idx];
575
576 BUG_ON(type->cnt >= type->max);
577 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
578 rgn->base = base;
579 rgn->size = size;
580 rgn->flags = flags;
581 memblock_set_region_node(rgn, nid);
582 type->cnt++;
583 type->total_size += size;
584}
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602static int __init_memblock memblock_add_range(struct memblock_type *type,
603 phys_addr_t base, phys_addr_t size,
604 int nid, enum memblock_flags flags)
605{
606 bool insert = false;
607 phys_addr_t obase = base;
608 phys_addr_t end = base + memblock_cap_size(base, &size);
609 int idx, nr_new;
610 struct memblock_region *rgn;
611
612 if (!size)
613 return 0;
614
615
616 if (type->regions[0].size == 0) {
617 WARN_ON(type->cnt != 1 || type->total_size);
618 type->regions[0].base = base;
619 type->regions[0].size = size;
620 type->regions[0].flags = flags;
621 memblock_set_region_node(&type->regions[0], nid);
622 type->total_size = size;
623 return 0;
624 }
625repeat:
626
627
628
629
630
631 base = obase;
632 nr_new = 0;
633
634 for_each_memblock_type(idx, type, rgn) {
635 phys_addr_t rbase = rgn->base;
636 phys_addr_t rend = rbase + rgn->size;
637
638 if (rbase >= end)
639 break;
640 if (rend <= base)
641 continue;
642
643
644
645
646 if (rbase > base) {
647#ifdef CONFIG_NEED_MULTIPLE_NODES
648 WARN_ON(nid != memblock_get_region_node(rgn));
649#endif
650 WARN_ON(flags != rgn->flags);
651 nr_new++;
652 if (insert)
653 memblock_insert_region(type, idx++, base,
654 rbase - base, nid,
655 flags);
656 }
657
658 base = min(rend, end);
659 }
660
661
662 if (base < end) {
663 nr_new++;
664 if (insert)
665 memblock_insert_region(type, idx, base, end - base,
666 nid, flags);
667 }
668
669 if (!nr_new)
670 return 0;
671
672
673
674
675
676 if (!insert) {
677 while (type->cnt + nr_new > type->max)
678 if (memblock_double_array(type, obase, size) < 0)
679 return -ENOMEM;
680 insert = true;
681 goto repeat;
682 } else {
683 memblock_merge_regions(type);
684 return 0;
685 }
686}
687
688
689
690
691
692
693
694
695
696
697
698
699
700int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
701 int nid)
702{
703 return memblock_add_range(&memblock.memory, base, size, nid, 0);
704}
705
706
707
708
709
710
711
712
713
714
715
716
717int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
718{
719 phys_addr_t end = base + size - 1;
720
721 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
722 &base, &end, (void *)_RET_IP_);
723
724 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
725}
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743static int __init_memblock memblock_isolate_range(struct memblock_type *type,
744 phys_addr_t base, phys_addr_t size,
745 int *start_rgn, int *end_rgn)
746{
747 phys_addr_t end = base + memblock_cap_size(base, &size);
748 int idx;
749 struct memblock_region *rgn;
750
751 *start_rgn = *end_rgn = 0;
752
753 if (!size)
754 return 0;
755
756
757 while (type->cnt + 2 > type->max)
758 if (memblock_double_array(type, base, size) < 0)
759 return -ENOMEM;
760
761 for_each_memblock_type(idx, type, rgn) {
762 phys_addr_t rbase = rgn->base;
763 phys_addr_t rend = rbase + rgn->size;
764
765 if (rbase >= end)
766 break;
767 if (rend <= base)
768 continue;
769
770 if (rbase < base) {
771
772
773
774
775 rgn->base = base;
776 rgn->size -= base - rbase;
777 type->total_size -= base - rbase;
778 memblock_insert_region(type, idx, rbase, base - rbase,
779 memblock_get_region_node(rgn),
780 rgn->flags);
781 } else if (rend > end) {
782
783
784
785
786 rgn->base = end;
787 rgn->size -= end - rbase;
788 type->total_size -= end - rbase;
789 memblock_insert_region(type, idx--, rbase, end - rbase,
790 memblock_get_region_node(rgn),
791 rgn->flags);
792 } else {
793
794 if (!*end_rgn)
795 *start_rgn = idx;
796 *end_rgn = idx + 1;
797 }
798 }
799
800 return 0;
801}
802
803static int __init_memblock memblock_remove_range(struct memblock_type *type,
804 phys_addr_t base, phys_addr_t size)
805{
806 int start_rgn, end_rgn;
807 int i, ret;
808
809 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
810 if (ret)
811 return ret;
812
813 for (i = end_rgn - 1; i >= start_rgn; i--)
814 memblock_remove_region(type, i);
815 return 0;
816}
817
818int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
819{
820 phys_addr_t end = base + size - 1;
821
822 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
823 &base, &end, (void *)_RET_IP_);
824
825 return memblock_remove_range(&memblock.memory, base, size);
826}
827
828
829
830
831
832
833
834
835
836int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
837{
838 phys_addr_t end = base + size - 1;
839
840 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
841 &base, &end, (void *)_RET_IP_);
842
843 kmemleak_free_part_phys(base, size);
844 return memblock_remove_range(&memblock.reserved, base, size);
845}
846
847int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
848{
849 phys_addr_t end = base + size - 1;
850
851 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
852 &base, &end, (void *)_RET_IP_);
853
854 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
855}
856
857#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
858int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
859{
860 phys_addr_t end = base + size - 1;
861
862 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
863 &base, &end, (void *)_RET_IP_);
864
865 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
866}
867#endif
868
869
870
871
872
873
874
875
876
877
878
879
880static int __init_memblock memblock_setclr_flag(phys_addr_t base,
881 phys_addr_t size, int set, int flag)
882{
883 struct memblock_type *type = &memblock.memory;
884 int i, ret, start_rgn, end_rgn;
885
886 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
887 if (ret)
888 return ret;
889
890 for (i = start_rgn; i < end_rgn; i++) {
891 struct memblock_region *r = &type->regions[i];
892
893 if (set)
894 r->flags |= flag;
895 else
896 r->flags &= ~flag;
897 }
898
899 memblock_merge_regions(type);
900 return 0;
901}
902
903
904
905
906
907
908
909
910int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
911{
912 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
913}
914
915
916
917
918
919
920
921
922int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
923{
924 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
925}
926
927
928
929
930
931
932
933
934int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
935{
936 system_has_some_mirror = true;
937
938 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
939}
940
941
942
943
944
945
946
947
948int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
949{
950 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
951}
952
953
954
955
956
957
958
959
960int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
961{
962 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
963}
964
965static bool should_skip_region(struct memblock_type *type,
966 struct memblock_region *m,
967 int nid, int flags)
968{
969 int m_nid = memblock_get_region_node(m);
970
971
972 if (type != memblock_memory)
973 return false;
974
975
976 if (nid != NUMA_NO_NODE && nid != m_nid)
977 return true;
978
979
980 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
981 return true;
982
983
984 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
985 return true;
986
987
988 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
989 return true;
990
991 return false;
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1021 struct memblock_type *type_a,
1022 struct memblock_type *type_b, phys_addr_t *out_start,
1023 phys_addr_t *out_end, int *out_nid)
1024{
1025 int idx_a = *idx & 0xffffffff;
1026 int idx_b = *idx >> 32;
1027
1028 if (WARN_ONCE(nid == MAX_NUMNODES,
1029 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1030 nid = NUMA_NO_NODE;
1031
1032 for (; idx_a < type_a->cnt; idx_a++) {
1033 struct memblock_region *m = &type_a->regions[idx_a];
1034
1035 phys_addr_t m_start = m->base;
1036 phys_addr_t m_end = m->base + m->size;
1037 int m_nid = memblock_get_region_node(m);
1038
1039 if (should_skip_region(type_a, m, nid, flags))
1040 continue;
1041
1042 if (!type_b) {
1043 if (out_start)
1044 *out_start = m_start;
1045 if (out_end)
1046 *out_end = m_end;
1047 if (out_nid)
1048 *out_nid = m_nid;
1049 idx_a++;
1050 *idx = (u32)idx_a | (u64)idx_b << 32;
1051 return;
1052 }
1053
1054
1055 for (; idx_b < type_b->cnt + 1; idx_b++) {
1056 struct memblock_region *r;
1057 phys_addr_t r_start;
1058 phys_addr_t r_end;
1059
1060 r = &type_b->regions[idx_b];
1061 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1062 r_end = idx_b < type_b->cnt ?
1063 r->base : PHYS_ADDR_MAX;
1064
1065
1066
1067
1068
1069 if (r_start >= m_end)
1070 break;
1071
1072 if (m_start < r_end) {
1073 if (out_start)
1074 *out_start =
1075 max(m_start, r_start);
1076 if (out_end)
1077 *out_end = min(m_end, r_end);
1078 if (out_nid)
1079 *out_nid = m_nid;
1080
1081
1082
1083
1084 if (m_end <= r_end)
1085 idx_a++;
1086 else
1087 idx_b++;
1088 *idx = (u32)idx_a | (u64)idx_b << 32;
1089 return;
1090 }
1091 }
1092 }
1093
1094
1095 *idx = ULLONG_MAX;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1116 enum memblock_flags flags,
1117 struct memblock_type *type_a,
1118 struct memblock_type *type_b,
1119 phys_addr_t *out_start,
1120 phys_addr_t *out_end, int *out_nid)
1121{
1122 int idx_a = *idx & 0xffffffff;
1123 int idx_b = *idx >> 32;
1124
1125 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1126 nid = NUMA_NO_NODE;
1127
1128 if (*idx == (u64)ULLONG_MAX) {
1129 idx_a = type_a->cnt - 1;
1130 if (type_b != NULL)
1131 idx_b = type_b->cnt;
1132 else
1133 idx_b = 0;
1134 }
1135
1136 for (; idx_a >= 0; idx_a--) {
1137 struct memblock_region *m = &type_a->regions[idx_a];
1138
1139 phys_addr_t m_start = m->base;
1140 phys_addr_t m_end = m->base + m->size;
1141 int m_nid = memblock_get_region_node(m);
1142
1143 if (should_skip_region(type_a, m, nid, flags))
1144 continue;
1145
1146 if (!type_b) {
1147 if (out_start)
1148 *out_start = m_start;
1149 if (out_end)
1150 *out_end = m_end;
1151 if (out_nid)
1152 *out_nid = m_nid;
1153 idx_a--;
1154 *idx = (u32)idx_a | (u64)idx_b << 32;
1155 return;
1156 }
1157
1158
1159 for (; idx_b >= 0; idx_b--) {
1160 struct memblock_region *r;
1161 phys_addr_t r_start;
1162 phys_addr_t r_end;
1163
1164 r = &type_b->regions[idx_b];
1165 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1166 r_end = idx_b < type_b->cnt ?
1167 r->base : PHYS_ADDR_MAX;
1168
1169
1170
1171
1172
1173 if (r_end <= m_start)
1174 break;
1175
1176 if (m_end > r_start) {
1177 if (out_start)
1178 *out_start = max(m_start, r_start);
1179 if (out_end)
1180 *out_end = min(m_end, r_end);
1181 if (out_nid)
1182 *out_nid = m_nid;
1183 if (m_start >= r_start)
1184 idx_a--;
1185 else
1186 idx_b--;
1187 *idx = (u32)idx_a | (u64)idx_b << 32;
1188 return;
1189 }
1190 }
1191 }
1192
1193 *idx = ULLONG_MAX;
1194}
1195
1196
1197
1198
1199void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1200 unsigned long *out_start_pfn,
1201 unsigned long *out_end_pfn, int *out_nid)
1202{
1203 struct memblock_type *type = &memblock.memory;
1204 struct memblock_region *r;
1205 int r_nid;
1206
1207 while (++*idx < type->cnt) {
1208 r = &type->regions[*idx];
1209 r_nid = memblock_get_region_node(r);
1210
1211 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1212 continue;
1213 if (nid == MAX_NUMNODES || nid == r_nid)
1214 break;
1215 }
1216 if (*idx >= type->cnt) {
1217 *idx = -1;
1218 return;
1219 }
1220
1221 if (out_start_pfn)
1222 *out_start_pfn = PFN_UP(r->base);
1223 if (out_end_pfn)
1224 *out_end_pfn = PFN_DOWN(r->base + r->size);
1225 if (out_nid)
1226 *out_nid = r_nid;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1243 struct memblock_type *type, int nid)
1244{
1245#ifdef CONFIG_NEED_MULTIPLE_NODES
1246 int start_rgn, end_rgn;
1247 int i, ret;
1248
1249 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1250 if (ret)
1251 return ret;
1252
1253 for (i = start_rgn; i < end_rgn; i++)
1254 memblock_set_region_node(&type->regions[i], nid);
1255
1256 memblock_merge_regions(type);
1257#endif
1258 return 0;
1259}
1260
1261#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277void __init_memblock
1278__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1279 unsigned long *out_spfn, unsigned long *out_epfn)
1280{
1281 int zone_nid = zone_to_nid(zone);
1282 phys_addr_t spa, epa;
1283 int nid;
1284
1285 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1286 &memblock.memory, &memblock.reserved,
1287 &spa, &epa, &nid);
1288
1289 while (*idx != U64_MAX) {
1290 unsigned long epfn = PFN_DOWN(epa);
1291 unsigned long spfn = PFN_UP(spa);
1292
1293
1294
1295
1296
1297 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1298
1299 if (zone_end_pfn(zone) <= spfn) {
1300 *idx = U64_MAX;
1301 break;
1302 }
1303
1304 if (out_spfn)
1305 *out_spfn = max(zone->zone_start_pfn, spfn);
1306 if (out_epfn)
1307 *out_epfn = min(zone_end_pfn(zone), epfn);
1308
1309 return;
1310 }
1311
1312 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1313 &memblock.memory, &memblock.reserved,
1314 &spa, &epa, &nid);
1315 }
1316
1317
1318 if (out_spfn)
1319 *out_spfn = ULONG_MAX;
1320 if (out_epfn)
1321 *out_epfn = 0;
1322}
1323
1324#endif
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1352 phys_addr_t align, phys_addr_t start,
1353 phys_addr_t end, int nid,
1354 bool exact_nid)
1355{
1356 enum memblock_flags flags = choose_memblock_flags();
1357 phys_addr_t found;
1358
1359 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1360 nid = NUMA_NO_NODE;
1361
1362 if (!align) {
1363
1364 dump_stack();
1365 align = SMP_CACHE_BYTES;
1366 }
1367
1368again:
1369 found = memblock_find_in_range_node(size, align, start, end, nid,
1370 flags);
1371 if (found && !memblock_reserve(found, size))
1372 goto done;
1373
1374 if (nid != NUMA_NO_NODE && !exact_nid) {
1375 found = memblock_find_in_range_node(size, align, start,
1376 end, NUMA_NO_NODE,
1377 flags);
1378 if (found && !memblock_reserve(found, size))
1379 goto done;
1380 }
1381
1382 if (flags & MEMBLOCK_MIRROR) {
1383 flags &= ~MEMBLOCK_MIRROR;
1384 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1385 &size);
1386 goto again;
1387 }
1388
1389 return 0;
1390
1391done:
1392
1393 if (end != MEMBLOCK_ALLOC_KASAN)
1394
1395
1396
1397
1398
1399
1400 kmemleak_alloc_phys(found, size, 0, 0);
1401
1402 return found;
1403}
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1418 phys_addr_t align,
1419 phys_addr_t start,
1420 phys_addr_t end)
1421{
1422 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1423 false);
1424}
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1440{
1441 return memblock_alloc_range_nid(size, align, 0,
1442 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465static void * __init memblock_alloc_internal(
1466 phys_addr_t size, phys_addr_t align,
1467 phys_addr_t min_addr, phys_addr_t max_addr,
1468 int nid, bool exact_nid)
1469{
1470 phys_addr_t alloc;
1471
1472
1473
1474
1475
1476
1477 if (WARN_ON_ONCE(slab_is_available()))
1478 return kzalloc_node(size, GFP_NOWAIT, nid);
1479
1480 if (max_addr > memblock.current_limit)
1481 max_addr = memblock.current_limit;
1482
1483 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1484 exact_nid);
1485
1486
1487 if (!alloc && min_addr)
1488 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1489 exact_nid);
1490
1491 if (!alloc)
1492 return NULL;
1493
1494 return phys_to_virt(alloc);
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515void * __init memblock_alloc_exact_nid_raw(
1516 phys_addr_t size, phys_addr_t align,
1517 phys_addr_t min_addr, phys_addr_t max_addr,
1518 int nid)
1519{
1520 void *ptr;
1521
1522 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1523 __func__, (u64)size, (u64)align, nid, &min_addr,
1524 &max_addr, (void *)_RET_IP_);
1525
1526 ptr = memblock_alloc_internal(size, align,
1527 min_addr, max_addr, nid, true);
1528 if (ptr && size > 0)
1529 page_init_poison(ptr, size);
1530
1531 return ptr;
1532}
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553void * __init memblock_alloc_try_nid_raw(
1554 phys_addr_t size, phys_addr_t align,
1555 phys_addr_t min_addr, phys_addr_t max_addr,
1556 int nid)
1557{
1558 void *ptr;
1559
1560 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1561 __func__, (u64)size, (u64)align, nid, &min_addr,
1562 &max_addr, (void *)_RET_IP_);
1563
1564 ptr = memblock_alloc_internal(size, align,
1565 min_addr, max_addr, nid, false);
1566 if (ptr && size > 0)
1567 page_init_poison(ptr, size);
1568
1569 return ptr;
1570}
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589void * __init memblock_alloc_try_nid(
1590 phys_addr_t size, phys_addr_t align,
1591 phys_addr_t min_addr, phys_addr_t max_addr,
1592 int nid)
1593{
1594 void *ptr;
1595
1596 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1597 __func__, (u64)size, (u64)align, nid, &min_addr,
1598 &max_addr, (void *)_RET_IP_);
1599 ptr = memblock_alloc_internal(size, align,
1600 min_addr, max_addr, nid, false);
1601 if (ptr)
1602 memset(ptr, 0, size);
1603
1604 return ptr;
1605}
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1617{
1618 phys_addr_t cursor, end;
1619
1620 end = base + size - 1;
1621 memblock_dbg("%s: [%pa-%pa] %pS\n",
1622 __func__, &base, &end, (void *)_RET_IP_);
1623 kmemleak_free_part_phys(base, size);
1624 cursor = PFN_UP(base);
1625 end = PFN_DOWN(base + size);
1626
1627 for (; cursor < end; cursor++) {
1628 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1629 totalram_pages_inc();
1630 }
1631}
1632
1633
1634
1635
1636
1637phys_addr_t __init_memblock memblock_phys_mem_size(void)
1638{
1639 return memblock.memory.total_size;
1640}
1641
1642phys_addr_t __init_memblock memblock_reserved_size(void)
1643{
1644 return memblock.reserved.total_size;
1645}
1646
1647
1648phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1649{
1650 return memblock.memory.regions[0].base;
1651}
1652
1653phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1654{
1655 int idx = memblock.memory.cnt - 1;
1656
1657 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1658}
1659
1660static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1661{
1662 phys_addr_t max_addr = PHYS_ADDR_MAX;
1663 struct memblock_region *r;
1664
1665
1666
1667
1668
1669
1670 for_each_mem_region(r) {
1671 if (limit <= r->size) {
1672 max_addr = r->base + limit;
1673 break;
1674 }
1675 limit -= r->size;
1676 }
1677
1678 return max_addr;
1679}
1680
1681void __init memblock_enforce_memory_limit(phys_addr_t limit)
1682{
1683 phys_addr_t max_addr;
1684
1685 if (!limit)
1686 return;
1687
1688 max_addr = __find_max_addr(limit);
1689
1690
1691 if (max_addr == PHYS_ADDR_MAX)
1692 return;
1693
1694
1695 memblock_remove_range(&memblock.memory, max_addr,
1696 PHYS_ADDR_MAX);
1697 memblock_remove_range(&memblock.reserved, max_addr,
1698 PHYS_ADDR_MAX);
1699}
1700
1701void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1702{
1703 int start_rgn, end_rgn;
1704 int i, ret;
1705
1706 if (!size)
1707 return;
1708
1709 ret = memblock_isolate_range(&memblock.memory, base, size,
1710 &start_rgn, &end_rgn);
1711 if (ret)
1712 return;
1713
1714
1715 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1716 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1717 memblock_remove_region(&memblock.memory, i);
1718
1719 for (i = start_rgn - 1; i >= 0; i--)
1720 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1721 memblock_remove_region(&memblock.memory, i);
1722
1723
1724 memblock_remove_range(&memblock.reserved, 0, base);
1725 memblock_remove_range(&memblock.reserved,
1726 base + size, PHYS_ADDR_MAX);
1727}
1728
1729void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1730{
1731 phys_addr_t max_addr;
1732
1733 if (!limit)
1734 return;
1735
1736 max_addr = __find_max_addr(limit);
1737
1738
1739 if (max_addr == PHYS_ADDR_MAX)
1740 return;
1741
1742 memblock_cap_memory_range(0, max_addr);
1743}
1744
1745static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1746{
1747 unsigned int left = 0, right = type->cnt;
1748
1749 do {
1750 unsigned int mid = (right + left) / 2;
1751
1752 if (addr < type->regions[mid].base)
1753 right = mid;
1754 else if (addr >= (type->regions[mid].base +
1755 type->regions[mid].size))
1756 left = mid + 1;
1757 else
1758 return mid;
1759 } while (left < right);
1760 return -1;
1761}
1762
1763bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1764{
1765 return memblock_search(&memblock.reserved, addr) != -1;
1766}
1767
1768bool __init_memblock memblock_is_memory(phys_addr_t addr)
1769{
1770 return memblock_search(&memblock.memory, addr) != -1;
1771}
1772
1773bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1774{
1775 int i = memblock_search(&memblock.memory, addr);
1776
1777 if (i == -1)
1778 return false;
1779 return !memblock_is_nomap(&memblock.memory.regions[i]);
1780}
1781
1782int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1783 unsigned long *start_pfn, unsigned long *end_pfn)
1784{
1785 struct memblock_type *type = &memblock.memory;
1786 int mid = memblock_search(type, PFN_PHYS(pfn));
1787
1788 if (mid == -1)
1789 return -1;
1790
1791 *start_pfn = PFN_DOWN(type->regions[mid].base);
1792 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1793
1794 return memblock_get_region_node(&type->regions[mid]);
1795}
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1808{
1809 int idx = memblock_search(&memblock.memory, base);
1810 phys_addr_t end = base + memblock_cap_size(base, &size);
1811
1812 if (idx == -1)
1813 return false;
1814 return (memblock.memory.regions[idx].base +
1815 memblock.memory.regions[idx].size) >= end;
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1830{
1831 memblock_cap_size(base, &size);
1832 return memblock_overlaps_region(&memblock.reserved, base, size);
1833}
1834
1835void __init_memblock memblock_trim_memory(phys_addr_t align)
1836{
1837 phys_addr_t start, end, orig_start, orig_end;
1838 struct memblock_region *r;
1839
1840 for_each_mem_region(r) {
1841 orig_start = r->base;
1842 orig_end = r->base + r->size;
1843 start = round_up(orig_start, align);
1844 end = round_down(orig_end, align);
1845
1846 if (start == orig_start && end == orig_end)
1847 continue;
1848
1849 if (start < end) {
1850 r->base = start;
1851 r->size = end - start;
1852 } else {
1853 memblock_remove_region(&memblock.memory,
1854 r - memblock.memory.regions);
1855 r--;
1856 }
1857 }
1858}
1859
1860void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1861{
1862 memblock.current_limit = limit;
1863}
1864
1865phys_addr_t __init_memblock memblock_get_current_limit(void)
1866{
1867 return memblock.current_limit;
1868}
1869
1870static void __init_memblock memblock_dump(struct memblock_type *type)
1871{
1872 phys_addr_t base, end, size;
1873 enum memblock_flags flags;
1874 int idx;
1875 struct memblock_region *rgn;
1876
1877 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1878
1879 for_each_memblock_type(idx, type, rgn) {
1880 char nid_buf[32] = "";
1881
1882 base = rgn->base;
1883 size = rgn->size;
1884 end = base + size - 1;
1885 flags = rgn->flags;
1886#ifdef CONFIG_NEED_MULTIPLE_NODES
1887 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1888 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1889 memblock_get_region_node(rgn));
1890#endif
1891 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1892 type->name, idx, &base, &end, &size, nid_buf, flags);
1893 }
1894}
1895
1896static void __init_memblock __memblock_dump_all(void)
1897{
1898 pr_info("MEMBLOCK configuration:\n");
1899 pr_info(" memory size = %pa reserved size = %pa\n",
1900 &memblock.memory.total_size,
1901 &memblock.reserved.total_size);
1902
1903 memblock_dump(&memblock.memory);
1904 memblock_dump(&memblock.reserved);
1905#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1906 memblock_dump(&physmem);
1907#endif
1908}
1909
1910void __init_memblock memblock_dump_all(void)
1911{
1912 if (memblock_debug)
1913 __memblock_dump_all();
1914}
1915
1916void __init memblock_allow_resize(void)
1917{
1918 memblock_can_resize = 1;
1919}
1920
1921static int __init early_memblock(char *p)
1922{
1923 if (p && strstr(p, "debug"))
1924 memblock_debug = 1;
1925 return 0;
1926}
1927early_param("memblock", early_memblock);
1928
1929static void __init __free_pages_memory(unsigned long start, unsigned long end)
1930{
1931 int order;
1932
1933 while (start < end) {
1934 order = min(MAX_ORDER - 1UL, __ffs(start));
1935
1936 while (start + (1UL << order) > end)
1937 order--;
1938
1939 memblock_free_pages(pfn_to_page(start), start, order);
1940
1941 start += (1UL << order);
1942 }
1943}
1944
1945static unsigned long __init __free_memory_core(phys_addr_t start,
1946 phys_addr_t end)
1947{
1948 unsigned long start_pfn = PFN_UP(start);
1949 unsigned long end_pfn = min_t(unsigned long,
1950 PFN_DOWN(end), max_low_pfn);
1951
1952 if (start_pfn >= end_pfn)
1953 return 0;
1954
1955 __free_pages_memory(start_pfn, end_pfn);
1956
1957 return end_pfn - start_pfn;
1958}
1959
1960static unsigned long __init free_low_memory_core_early(void)
1961{
1962 unsigned long count = 0;
1963 phys_addr_t start, end;
1964 u64 i;
1965
1966 memblock_clear_hotplug(0, -1);
1967
1968 for_each_reserved_mem_range(i, &start, &end)
1969 reserve_bootmem_region(start, end);
1970
1971
1972
1973
1974
1975
1976 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
1977 NULL)
1978 count += __free_memory_core(start, end);
1979
1980 return count;
1981}
1982
1983static int reset_managed_pages_done __initdata;
1984
1985void reset_node_managed_pages(pg_data_t *pgdat)
1986{
1987 struct zone *z;
1988
1989 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1990 atomic_long_set(&z->managed_pages, 0);
1991}
1992
1993void __init reset_all_zones_managed_pages(void)
1994{
1995 struct pglist_data *pgdat;
1996
1997 if (reset_managed_pages_done)
1998 return;
1999
2000 for_each_online_pgdat(pgdat)
2001 reset_node_managed_pages(pgdat);
2002
2003 reset_managed_pages_done = 1;
2004}
2005
2006
2007
2008
2009
2010
2011unsigned long __init memblock_free_all(void)
2012{
2013 unsigned long pages;
2014
2015 reset_all_zones_managed_pages();
2016
2017 pages = free_low_memory_core_early();
2018 totalram_pages_add(pages);
2019
2020 return pages;
2021}
2022
2023#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2024
2025static int memblock_debug_show(struct seq_file *m, void *private)
2026{
2027 struct memblock_type *type = m->private;
2028 struct memblock_region *reg;
2029 int i;
2030 phys_addr_t end;
2031
2032 for (i = 0; i < type->cnt; i++) {
2033 reg = &type->regions[i];
2034 end = reg->base + reg->size - 1;
2035
2036 seq_printf(m, "%4d: ", i);
2037 seq_printf(m, "%pa..%pa\n", ®->base, &end);
2038 }
2039 return 0;
2040}
2041DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2042
2043static int __init memblock_init_debugfs(void)
2044{
2045 struct dentry *root = debugfs_create_dir("memblock", NULL);
2046
2047 debugfs_create_file("memory", 0444, root,
2048 &memblock.memory, &memblock_debug_fops);
2049 debugfs_create_file("reserved", 0444, root,
2050 &memblock.reserved, &memblock_debug_fops);
2051#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2052 debugfs_create_file("physmem", 0444, root, &physmem,
2053 &memblock_debug_fops);
2054#endif
2055
2056 return 0;
2057}
2058__initcall(memblock_init_debugfs);
2059
2060#endif
2061