1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/bitops.h>
13#include <linux/poison.h>
14#include <linux/pfn.h>
15#include <linux/debugfs.h>
16#include <linux/kmemleak.h>
17#include <linux/seq_file.h>
18#include <linux/memblock.h>
19
20#include <asm/sections.h>
21#include <linux/io.h>
22
23#include "internal.h"
24
25#define INIT_MEMBLOCK_REGIONS 128
26#define INIT_PHYSMEM_REGIONS 4
27
28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30#endif
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#ifndef CONFIG_NUMA
96struct pglist_data __refdata contig_page_data;
97EXPORT_SYMBOL(contig_page_data);
98#endif
99
100unsigned long max_low_pfn;
101unsigned long min_low_pfn;
102unsigned long max_pfn;
103unsigned long long max_possible_pfn;
104
105static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
106static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
107#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
108static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
109#endif
110
111struct memblock memblock __initdata_memblock = {
112 .memory.regions = memblock_memory_init_regions,
113 .memory.cnt = 1,
114 .memory.max = INIT_MEMBLOCK_REGIONS,
115 .memory.name = "memory",
116
117 .reserved.regions = memblock_reserved_init_regions,
118 .reserved.cnt = 1,
119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
120 .reserved.name = "reserved",
121
122 .bottom_up = false,
123 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
124};
125
126#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127struct memblock_type physmem = {
128 .regions = memblock_physmem_init_regions,
129 .cnt = 1,
130 .max = INIT_PHYSMEM_REGIONS,
131 .name = "physmem",
132};
133#endif
134
135
136
137
138
139
140
141static __refdata struct memblock_type *memblock_memory = &memblock.memory;
142
143#define for_each_memblock_type(i, memblock_type, rgn) \
144 for (i = 0, rgn = &memblock_type->regions[0]; \
145 i < memblock_type->cnt; \
146 i++, rgn = &memblock_type->regions[i])
147
148#define memblock_dbg(fmt, ...) \
149 do { \
150 if (memblock_debug) \
151 pr_info(fmt, ##__VA_ARGS__); \
152 } while (0)
153
154static int memblock_debug __initdata_memblock;
155static bool system_has_some_mirror __initdata_memblock = false;
156static int memblock_can_resize __initdata_memblock;
157static int memblock_memory_in_slab __initdata_memblock = 0;
158static int memblock_reserved_in_slab __initdata_memblock = 0;
159
160static enum memblock_flags __init_memblock choose_memblock_flags(void)
161{
162 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
163}
164
165
166static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
167{
168 return *size = min(*size, PHYS_ADDR_MAX - base);
169}
170
171
172
173
174static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
175 phys_addr_t base2, phys_addr_t size2)
176{
177 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
178}
179
180bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
181 phys_addr_t base, phys_addr_t size)
182{
183 unsigned long i;
184
185 memblock_cap_size(base, &size);
186
187 for (i = 0; i < type->cnt; i++)
188 if (memblock_addrs_overlap(base, size, type->regions[i].base,
189 type->regions[i].size))
190 break;
191 return i < type->cnt;
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209static phys_addr_t __init_memblock
210__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
211 phys_addr_t size, phys_addr_t align, int nid,
212 enum memblock_flags flags)
213{
214 phys_addr_t this_start, this_end, cand;
215 u64 i;
216
217 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
218 this_start = clamp(this_start, start, end);
219 this_end = clamp(this_end, start, end);
220
221 cand = round_up(this_start, align);
222 if (cand < this_end && this_end - cand >= size)
223 return cand;
224 }
225
226 return 0;
227}
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244static phys_addr_t __init_memblock
245__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
246 phys_addr_t size, phys_addr_t align, int nid,
247 enum memblock_flags flags)
248{
249 phys_addr_t this_start, this_end, cand;
250 u64 i;
251
252 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
253 NULL) {
254 this_start = clamp(this_start, start, end);
255 this_end = clamp(this_end, start, end);
256
257 if (this_end < size)
258 continue;
259
260 cand = round_down(this_end - size, align);
261 if (cand >= this_start)
262 return cand;
263 }
264
265 return 0;
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
284 phys_addr_t align, phys_addr_t start,
285 phys_addr_t end, int nid,
286 enum memblock_flags flags)
287{
288
289 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
290 end == MEMBLOCK_ALLOC_KASAN)
291 end = memblock.current_limit;
292
293
294 start = max_t(phys_addr_t, start, PAGE_SIZE);
295 end = max(start, end);
296
297 if (memblock_bottom_up())
298 return __memblock_find_range_bottom_up(start, end, size, align,
299 nid, flags);
300 else
301 return __memblock_find_range_top_down(start, end, size, align,
302 nid, flags);
303}
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
319 phys_addr_t end, phys_addr_t size,
320 phys_addr_t align)
321{
322 phys_addr_t ret;
323 enum memblock_flags flags = choose_memblock_flags();
324
325again:
326 ret = memblock_find_in_range_node(size, align, start, end,
327 NUMA_NO_NODE, flags);
328
329 if (!ret && (flags & MEMBLOCK_MIRROR)) {
330 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
331 &size);
332 flags &= ~MEMBLOCK_MIRROR;
333 goto again;
334 }
335
336 return ret;
337}
338
339static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
340{
341 type->total_size -= type->regions[r].size;
342 memmove(&type->regions[r], &type->regions[r + 1],
343 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
344 type->cnt--;
345
346
347 if (type->cnt == 0) {
348 WARN_ON(type->total_size != 0);
349 type->cnt = 1;
350 type->regions[0].base = 0;
351 type->regions[0].size = 0;
352 type->regions[0].flags = 0;
353 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
354 }
355}
356
357#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
358
359
360
361void __init memblock_discard(void)
362{
363 phys_addr_t addr, size;
364
365 if (memblock.reserved.regions != memblock_reserved_init_regions) {
366 addr = __pa(memblock.reserved.regions);
367 size = PAGE_ALIGN(sizeof(struct memblock_region) *
368 memblock.reserved.max);
369 __memblock_free_late(addr, size);
370 }
371
372 if (memblock.memory.regions != memblock_memory_init_regions) {
373 addr = __pa(memblock.memory.regions);
374 size = PAGE_ALIGN(sizeof(struct memblock_region) *
375 memblock.memory.max);
376 __memblock_free_late(addr, size);
377 }
378
379 memblock_memory = NULL;
380}
381#endif
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398static int __init_memblock memblock_double_array(struct memblock_type *type,
399 phys_addr_t new_area_start,
400 phys_addr_t new_area_size)
401{
402 struct memblock_region *new_array, *old_array;
403 phys_addr_t old_alloc_size, new_alloc_size;
404 phys_addr_t old_size, new_size, addr, new_end;
405 int use_slab = slab_is_available();
406 int *in_slab;
407
408
409
410
411 if (!memblock_can_resize)
412 return -1;
413
414
415 old_size = type->max * sizeof(struct memblock_region);
416 new_size = old_size << 1;
417
418
419
420
421 old_alloc_size = PAGE_ALIGN(old_size);
422 new_alloc_size = PAGE_ALIGN(new_size);
423
424
425 if (type == &memblock.memory)
426 in_slab = &memblock_memory_in_slab;
427 else
428 in_slab = &memblock_reserved_in_slab;
429
430
431 if (use_slab) {
432 new_array = kmalloc(new_size, GFP_KERNEL);
433 addr = new_array ? __pa(new_array) : 0;
434 } else {
435
436 if (type != &memblock.reserved)
437 new_area_start = new_area_size = 0;
438
439 addr = memblock_find_in_range(new_area_start + new_area_size,
440 memblock.current_limit,
441 new_alloc_size, PAGE_SIZE);
442 if (!addr && new_area_size)
443 addr = memblock_find_in_range(0,
444 min(new_area_start, memblock.current_limit),
445 new_alloc_size, PAGE_SIZE);
446
447 new_array = addr ? __va(addr) : NULL;
448 }
449 if (!addr) {
450 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
451 type->name, type->max, type->max * 2);
452 return -1;
453 }
454
455 new_end = addr + new_size - 1;
456 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
457 type->name, type->max * 2, &addr, &new_end);
458
459
460
461
462
463
464 memcpy(new_array, type->regions, old_size);
465 memset(new_array + type->max, 0, old_size);
466 old_array = type->regions;
467 type->regions = new_array;
468 type->max <<= 1;
469
470
471 if (*in_slab)
472 kfree(old_array);
473 else if (old_array != memblock_memory_init_regions &&
474 old_array != memblock_reserved_init_regions)
475 memblock_free(__pa(old_array), old_alloc_size);
476
477
478
479
480
481 if (!use_slab)
482 BUG_ON(memblock_reserve(addr, new_alloc_size));
483
484
485 *in_slab = use_slab;
486
487 return 0;
488}
489
490
491
492
493
494
495
496static void __init_memblock memblock_merge_regions(struct memblock_type *type)
497{
498 int i = 0;
499
500
501 while (i < type->cnt - 1) {
502 struct memblock_region *this = &type->regions[i];
503 struct memblock_region *next = &type->regions[i + 1];
504
505 if (this->base + this->size != next->base ||
506 memblock_get_region_node(this) !=
507 memblock_get_region_node(next) ||
508 this->flags != next->flags) {
509 BUG_ON(this->base + this->size > next->base);
510 i++;
511 continue;
512 }
513
514 this->size += next->size;
515
516 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
517 type->cnt--;
518 }
519}
520
521
522
523
524
525
526
527
528
529
530
531
532
533static void __init_memblock memblock_insert_region(struct memblock_type *type,
534 int idx, phys_addr_t base,
535 phys_addr_t size,
536 int nid,
537 enum memblock_flags flags)
538{
539 struct memblock_region *rgn = &type->regions[idx];
540
541 BUG_ON(type->cnt >= type->max);
542 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
543 rgn->base = base;
544 rgn->size = size;
545 rgn->flags = flags;
546 memblock_set_region_node(rgn, nid);
547 type->cnt++;
548 type->total_size += size;
549}
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567static int __init_memblock memblock_add_range(struct memblock_type *type,
568 phys_addr_t base, phys_addr_t size,
569 int nid, enum memblock_flags flags)
570{
571 bool insert = false;
572 phys_addr_t obase = base;
573 phys_addr_t end = base + memblock_cap_size(base, &size);
574 int idx, nr_new;
575 struct memblock_region *rgn;
576
577 if (!size)
578 return 0;
579
580
581 if (type->regions[0].size == 0) {
582 WARN_ON(type->cnt != 1 || type->total_size);
583 type->regions[0].base = base;
584 type->regions[0].size = size;
585 type->regions[0].flags = flags;
586 memblock_set_region_node(&type->regions[0], nid);
587 type->total_size = size;
588 return 0;
589 }
590repeat:
591
592
593
594
595
596 base = obase;
597 nr_new = 0;
598
599 for_each_memblock_type(idx, type, rgn) {
600 phys_addr_t rbase = rgn->base;
601 phys_addr_t rend = rbase + rgn->size;
602
603 if (rbase >= end)
604 break;
605 if (rend <= base)
606 continue;
607
608
609
610
611 if (rbase > base) {
612#ifdef CONFIG_NUMA
613 WARN_ON(nid != memblock_get_region_node(rgn));
614#endif
615 WARN_ON(flags != rgn->flags);
616 nr_new++;
617 if (insert)
618 memblock_insert_region(type, idx++, base,
619 rbase - base, nid,
620 flags);
621 }
622
623 base = min(rend, end);
624 }
625
626
627 if (base < end) {
628 nr_new++;
629 if (insert)
630 memblock_insert_region(type, idx, base, end - base,
631 nid, flags);
632 }
633
634 if (!nr_new)
635 return 0;
636
637
638
639
640
641 if (!insert) {
642 while (type->cnt + nr_new > type->max)
643 if (memblock_double_array(type, obase, size) < 0)
644 return -ENOMEM;
645 insert = true;
646 goto repeat;
647 } else {
648 memblock_merge_regions(type);
649 return 0;
650 }
651}
652
653
654
655
656
657
658
659
660
661
662
663
664
665int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
666 int nid)
667{
668 return memblock_add_range(&memblock.memory, base, size, nid, 0);
669}
670
671
672
673
674
675
676
677
678
679
680
681
682int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
683{
684 phys_addr_t end = base + size - 1;
685
686 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
687 &base, &end, (void *)_RET_IP_);
688
689 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
690}
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708static int __init_memblock memblock_isolate_range(struct memblock_type *type,
709 phys_addr_t base, phys_addr_t size,
710 int *start_rgn, int *end_rgn)
711{
712 phys_addr_t end = base + memblock_cap_size(base, &size);
713 int idx;
714 struct memblock_region *rgn;
715
716 *start_rgn = *end_rgn = 0;
717
718 if (!size)
719 return 0;
720
721
722 while (type->cnt + 2 > type->max)
723 if (memblock_double_array(type, base, size) < 0)
724 return -ENOMEM;
725
726 for_each_memblock_type(idx, type, rgn) {
727 phys_addr_t rbase = rgn->base;
728 phys_addr_t rend = rbase + rgn->size;
729
730 if (rbase >= end)
731 break;
732 if (rend <= base)
733 continue;
734
735 if (rbase < base) {
736
737
738
739
740 rgn->base = base;
741 rgn->size -= base - rbase;
742 type->total_size -= base - rbase;
743 memblock_insert_region(type, idx, rbase, base - rbase,
744 memblock_get_region_node(rgn),
745 rgn->flags);
746 } else if (rend > end) {
747
748
749
750
751 rgn->base = end;
752 rgn->size -= end - rbase;
753 type->total_size -= end - rbase;
754 memblock_insert_region(type, idx--, rbase, end - rbase,
755 memblock_get_region_node(rgn),
756 rgn->flags);
757 } else {
758
759 if (!*end_rgn)
760 *start_rgn = idx;
761 *end_rgn = idx + 1;
762 }
763 }
764
765 return 0;
766}
767
768static int __init_memblock memblock_remove_range(struct memblock_type *type,
769 phys_addr_t base, phys_addr_t size)
770{
771 int start_rgn, end_rgn;
772 int i, ret;
773
774 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
775 if (ret)
776 return ret;
777
778 for (i = end_rgn - 1; i >= start_rgn; i--)
779 memblock_remove_region(type, i);
780 return 0;
781}
782
783int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
784{
785 phys_addr_t end = base + size - 1;
786
787 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
788 &base, &end, (void *)_RET_IP_);
789
790 return memblock_remove_range(&memblock.memory, base, size);
791}
792
793
794
795
796
797
798
799
800
801int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
802{
803 phys_addr_t end = base + size - 1;
804
805 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
806 &base, &end, (void *)_RET_IP_);
807
808 kmemleak_free_part_phys(base, size);
809 return memblock_remove_range(&memblock.reserved, base, size);
810}
811
812int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
813{
814 phys_addr_t end = base + size - 1;
815
816 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
817 &base, &end, (void *)_RET_IP_);
818
819 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
820}
821
822#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
823int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
824{
825 phys_addr_t end = base + size - 1;
826
827 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
828 &base, &end, (void *)_RET_IP_);
829
830 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
831}
832#endif
833
834
835
836
837
838
839
840
841
842
843
844
845static int __init_memblock memblock_setclr_flag(phys_addr_t base,
846 phys_addr_t size, int set, int flag)
847{
848 struct memblock_type *type = &memblock.memory;
849 int i, ret, start_rgn, end_rgn;
850
851 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
852 if (ret)
853 return ret;
854
855 for (i = start_rgn; i < end_rgn; i++) {
856 struct memblock_region *r = &type->regions[i];
857
858 if (set)
859 r->flags |= flag;
860 else
861 r->flags &= ~flag;
862 }
863
864 memblock_merge_regions(type);
865 return 0;
866}
867
868
869
870
871
872
873
874
875int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
876{
877 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
878}
879
880
881
882
883
884
885
886
887int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
888{
889 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
890}
891
892
893
894
895
896
897
898
899int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
900{
901 system_has_some_mirror = true;
902
903 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
904}
905
906
907
908
909
910
911
912
913
914
915
916
917
918int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
919{
920 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
921}
922
923
924
925
926
927
928
929
930int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
931{
932 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
933}
934
935static bool should_skip_region(struct memblock_type *type,
936 struct memblock_region *m,
937 int nid, int flags)
938{
939 int m_nid = memblock_get_region_node(m);
940
941
942 if (type != memblock_memory)
943 return false;
944
945
946 if (nid != NUMA_NO_NODE && nid != m_nid)
947 return true;
948
949
950 if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
951 !(flags & MEMBLOCK_HOTPLUG))
952 return true;
953
954
955 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
956 return true;
957
958
959 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
960 return true;
961
962 return false;
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
992 struct memblock_type *type_a,
993 struct memblock_type *type_b, phys_addr_t *out_start,
994 phys_addr_t *out_end, int *out_nid)
995{
996 int idx_a = *idx & 0xffffffff;
997 int idx_b = *idx >> 32;
998
999 if (WARN_ONCE(nid == MAX_NUMNODES,
1000 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1001 nid = NUMA_NO_NODE;
1002
1003 for (; idx_a < type_a->cnt; idx_a++) {
1004 struct memblock_region *m = &type_a->regions[idx_a];
1005
1006 phys_addr_t m_start = m->base;
1007 phys_addr_t m_end = m->base + m->size;
1008 int m_nid = memblock_get_region_node(m);
1009
1010 if (should_skip_region(type_a, m, nid, flags))
1011 continue;
1012
1013 if (!type_b) {
1014 if (out_start)
1015 *out_start = m_start;
1016 if (out_end)
1017 *out_end = m_end;
1018 if (out_nid)
1019 *out_nid = m_nid;
1020 idx_a++;
1021 *idx = (u32)idx_a | (u64)idx_b << 32;
1022 return;
1023 }
1024
1025
1026 for (; idx_b < type_b->cnt + 1; idx_b++) {
1027 struct memblock_region *r;
1028 phys_addr_t r_start;
1029 phys_addr_t r_end;
1030
1031 r = &type_b->regions[idx_b];
1032 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1033 r_end = idx_b < type_b->cnt ?
1034 r->base : PHYS_ADDR_MAX;
1035
1036
1037
1038
1039
1040 if (r_start >= m_end)
1041 break;
1042
1043 if (m_start < r_end) {
1044 if (out_start)
1045 *out_start =
1046 max(m_start, r_start);
1047 if (out_end)
1048 *out_end = min(m_end, r_end);
1049 if (out_nid)
1050 *out_nid = m_nid;
1051
1052
1053
1054
1055 if (m_end <= r_end)
1056 idx_a++;
1057 else
1058 idx_b++;
1059 *idx = (u32)idx_a | (u64)idx_b << 32;
1060 return;
1061 }
1062 }
1063 }
1064
1065
1066 *idx = ULLONG_MAX;
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1087 enum memblock_flags flags,
1088 struct memblock_type *type_a,
1089 struct memblock_type *type_b,
1090 phys_addr_t *out_start,
1091 phys_addr_t *out_end, int *out_nid)
1092{
1093 int idx_a = *idx & 0xffffffff;
1094 int idx_b = *idx >> 32;
1095
1096 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1097 nid = NUMA_NO_NODE;
1098
1099 if (*idx == (u64)ULLONG_MAX) {
1100 idx_a = type_a->cnt - 1;
1101 if (type_b != NULL)
1102 idx_b = type_b->cnt;
1103 else
1104 idx_b = 0;
1105 }
1106
1107 for (; idx_a >= 0; idx_a--) {
1108 struct memblock_region *m = &type_a->regions[idx_a];
1109
1110 phys_addr_t m_start = m->base;
1111 phys_addr_t m_end = m->base + m->size;
1112 int m_nid = memblock_get_region_node(m);
1113
1114 if (should_skip_region(type_a, m, nid, flags))
1115 continue;
1116
1117 if (!type_b) {
1118 if (out_start)
1119 *out_start = m_start;
1120 if (out_end)
1121 *out_end = m_end;
1122 if (out_nid)
1123 *out_nid = m_nid;
1124 idx_a--;
1125 *idx = (u32)idx_a | (u64)idx_b << 32;
1126 return;
1127 }
1128
1129
1130 for (; idx_b >= 0; idx_b--) {
1131 struct memblock_region *r;
1132 phys_addr_t r_start;
1133 phys_addr_t r_end;
1134
1135 r = &type_b->regions[idx_b];
1136 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1137 r_end = idx_b < type_b->cnt ?
1138 r->base : PHYS_ADDR_MAX;
1139
1140
1141
1142
1143
1144 if (r_end <= m_start)
1145 break;
1146
1147 if (m_end > r_start) {
1148 if (out_start)
1149 *out_start = max(m_start, r_start);
1150 if (out_end)
1151 *out_end = min(m_end, r_end);
1152 if (out_nid)
1153 *out_nid = m_nid;
1154 if (m_start >= r_start)
1155 idx_a--;
1156 else
1157 idx_b--;
1158 *idx = (u32)idx_a | (u64)idx_b << 32;
1159 return;
1160 }
1161 }
1162 }
1163
1164 *idx = ULLONG_MAX;
1165}
1166
1167
1168
1169
1170void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1171 unsigned long *out_start_pfn,
1172 unsigned long *out_end_pfn, int *out_nid)
1173{
1174 struct memblock_type *type = &memblock.memory;
1175 struct memblock_region *r;
1176 int r_nid;
1177
1178 while (++*idx < type->cnt) {
1179 r = &type->regions[*idx];
1180 r_nid = memblock_get_region_node(r);
1181
1182 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1183 continue;
1184 if (nid == MAX_NUMNODES || nid == r_nid)
1185 break;
1186 }
1187 if (*idx >= type->cnt) {
1188 *idx = -1;
1189 return;
1190 }
1191
1192 if (out_start_pfn)
1193 *out_start_pfn = PFN_UP(r->base);
1194 if (out_end_pfn)
1195 *out_end_pfn = PFN_DOWN(r->base + r->size);
1196 if (out_nid)
1197 *out_nid = r_nid;
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1214 struct memblock_type *type, int nid)
1215{
1216#ifdef CONFIG_NUMA
1217 int start_rgn, end_rgn;
1218 int i, ret;
1219
1220 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1221 if (ret)
1222 return ret;
1223
1224 for (i = start_rgn; i < end_rgn; i++)
1225 memblock_set_region_node(&type->regions[i], nid);
1226
1227 memblock_merge_regions(type);
1228#endif
1229 return 0;
1230}
1231
1232#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248void __init_memblock
1249__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1250 unsigned long *out_spfn, unsigned long *out_epfn)
1251{
1252 int zone_nid = zone_to_nid(zone);
1253 phys_addr_t spa, epa;
1254 int nid;
1255
1256 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1257 &memblock.memory, &memblock.reserved,
1258 &spa, &epa, &nid);
1259
1260 while (*idx != U64_MAX) {
1261 unsigned long epfn = PFN_DOWN(epa);
1262 unsigned long spfn = PFN_UP(spa);
1263
1264
1265
1266
1267
1268 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1269
1270 if (zone_end_pfn(zone) <= spfn) {
1271 *idx = U64_MAX;
1272 break;
1273 }
1274
1275 if (out_spfn)
1276 *out_spfn = max(zone->zone_start_pfn, spfn);
1277 if (out_epfn)
1278 *out_epfn = min(zone_end_pfn(zone), epfn);
1279
1280 return;
1281 }
1282
1283 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1284 &memblock.memory, &memblock.reserved,
1285 &spa, &epa, &nid);
1286 }
1287
1288
1289 if (out_spfn)
1290 *out_spfn = ULONG_MAX;
1291 if (out_epfn)
1292 *out_epfn = 0;
1293}
1294
1295#endif
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1323 phys_addr_t align, phys_addr_t start,
1324 phys_addr_t end, int nid,
1325 bool exact_nid)
1326{
1327 enum memblock_flags flags = choose_memblock_flags();
1328 phys_addr_t found;
1329
1330 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1331 nid = NUMA_NO_NODE;
1332
1333 if (!align) {
1334
1335 dump_stack();
1336 align = SMP_CACHE_BYTES;
1337 }
1338
1339again:
1340 found = memblock_find_in_range_node(size, align, start, end, nid,
1341 flags);
1342 if (found && !memblock_reserve(found, size))
1343 goto done;
1344
1345 if (nid != NUMA_NO_NODE && !exact_nid) {
1346 found = memblock_find_in_range_node(size, align, start,
1347 end, NUMA_NO_NODE,
1348 flags);
1349 if (found && !memblock_reserve(found, size))
1350 goto done;
1351 }
1352
1353 if (flags & MEMBLOCK_MIRROR) {
1354 flags &= ~MEMBLOCK_MIRROR;
1355 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1356 &size);
1357 goto again;
1358 }
1359
1360 return 0;
1361
1362done:
1363
1364 if (end != MEMBLOCK_ALLOC_KASAN)
1365
1366
1367
1368
1369
1370
1371 kmemleak_alloc_phys(found, size, 0, 0);
1372
1373 return found;
1374}
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1389 phys_addr_t align,
1390 phys_addr_t start,
1391 phys_addr_t end)
1392{
1393 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1394 __func__, (u64)size, (u64)align, &start, &end,
1395 (void *)_RET_IP_);
1396 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1397 false);
1398}
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1414{
1415 return memblock_alloc_range_nid(size, align, 0,
1416 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439static void * __init memblock_alloc_internal(
1440 phys_addr_t size, phys_addr_t align,
1441 phys_addr_t min_addr, phys_addr_t max_addr,
1442 int nid, bool exact_nid)
1443{
1444 phys_addr_t alloc;
1445
1446
1447
1448
1449
1450
1451 if (WARN_ON_ONCE(slab_is_available()))
1452 return kzalloc_node(size, GFP_NOWAIT, nid);
1453
1454 if (max_addr > memblock.current_limit)
1455 max_addr = memblock.current_limit;
1456
1457 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1458 exact_nid);
1459
1460
1461 if (!alloc && min_addr)
1462 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1463 exact_nid);
1464
1465 if (!alloc)
1466 return NULL;
1467
1468 return phys_to_virt(alloc);
1469}
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489void * __init memblock_alloc_exact_nid_raw(
1490 phys_addr_t size, phys_addr_t align,
1491 phys_addr_t min_addr, phys_addr_t max_addr,
1492 int nid)
1493{
1494 void *ptr;
1495
1496 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1497 __func__, (u64)size, (u64)align, nid, &min_addr,
1498 &max_addr, (void *)_RET_IP_);
1499
1500 ptr = memblock_alloc_internal(size, align,
1501 min_addr, max_addr, nid, true);
1502 if (ptr && size > 0)
1503 page_init_poison(ptr, size);
1504
1505 return ptr;
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527void * __init memblock_alloc_try_nid_raw(
1528 phys_addr_t size, phys_addr_t align,
1529 phys_addr_t min_addr, phys_addr_t max_addr,
1530 int nid)
1531{
1532 void *ptr;
1533
1534 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1535 __func__, (u64)size, (u64)align, nid, &min_addr,
1536 &max_addr, (void *)_RET_IP_);
1537
1538 ptr = memblock_alloc_internal(size, align,
1539 min_addr, max_addr, nid, false);
1540 if (ptr && size > 0)
1541 page_init_poison(ptr, size);
1542
1543 return ptr;
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563void * __init memblock_alloc_try_nid(
1564 phys_addr_t size, phys_addr_t align,
1565 phys_addr_t min_addr, phys_addr_t max_addr,
1566 int nid)
1567{
1568 void *ptr;
1569
1570 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1571 __func__, (u64)size, (u64)align, nid, &min_addr,
1572 &max_addr, (void *)_RET_IP_);
1573 ptr = memblock_alloc_internal(size, align,
1574 min_addr, max_addr, nid, false);
1575 if (ptr)
1576 memset(ptr, 0, size);
1577
1578 return ptr;
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1591{
1592 phys_addr_t cursor, end;
1593
1594 end = base + size - 1;
1595 memblock_dbg("%s: [%pa-%pa] %pS\n",
1596 __func__, &base, &end, (void *)_RET_IP_);
1597 kmemleak_free_part_phys(base, size);
1598 cursor = PFN_UP(base);
1599 end = PFN_DOWN(base + size);
1600
1601 for (; cursor < end; cursor++) {
1602 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1603 totalram_pages_inc();
1604 }
1605}
1606
1607
1608
1609
1610
1611phys_addr_t __init_memblock memblock_phys_mem_size(void)
1612{
1613 return memblock.memory.total_size;
1614}
1615
1616phys_addr_t __init_memblock memblock_reserved_size(void)
1617{
1618 return memblock.reserved.total_size;
1619}
1620
1621
1622phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1623{
1624 return memblock.memory.regions[0].base;
1625}
1626
1627phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1628{
1629 int idx = memblock.memory.cnt - 1;
1630
1631 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1632}
1633
1634static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1635{
1636 phys_addr_t max_addr = PHYS_ADDR_MAX;
1637 struct memblock_region *r;
1638
1639
1640
1641
1642
1643
1644 for_each_mem_region(r) {
1645 if (limit <= r->size) {
1646 max_addr = r->base + limit;
1647 break;
1648 }
1649 limit -= r->size;
1650 }
1651
1652 return max_addr;
1653}
1654
1655void __init memblock_enforce_memory_limit(phys_addr_t limit)
1656{
1657 phys_addr_t max_addr;
1658
1659 if (!limit)
1660 return;
1661
1662 max_addr = __find_max_addr(limit);
1663
1664
1665 if (max_addr == PHYS_ADDR_MAX)
1666 return;
1667
1668
1669 memblock_remove_range(&memblock.memory, max_addr,
1670 PHYS_ADDR_MAX);
1671 memblock_remove_range(&memblock.reserved, max_addr,
1672 PHYS_ADDR_MAX);
1673}
1674
1675void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1676{
1677 int start_rgn, end_rgn;
1678 int i, ret;
1679
1680 if (!size)
1681 return;
1682
1683 ret = memblock_isolate_range(&memblock.memory, base, size,
1684 &start_rgn, &end_rgn);
1685 if (ret)
1686 return;
1687
1688
1689 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1690 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1691 memblock_remove_region(&memblock.memory, i);
1692
1693 for (i = start_rgn - 1; i >= 0; i--)
1694 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1695 memblock_remove_region(&memblock.memory, i);
1696
1697
1698 memblock_remove_range(&memblock.reserved, 0, base);
1699 memblock_remove_range(&memblock.reserved,
1700 base + size, PHYS_ADDR_MAX);
1701}
1702
1703void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1704{
1705 phys_addr_t max_addr;
1706
1707 if (!limit)
1708 return;
1709
1710 max_addr = __find_max_addr(limit);
1711
1712
1713 if (max_addr == PHYS_ADDR_MAX)
1714 return;
1715
1716 memblock_cap_memory_range(0, max_addr);
1717}
1718
1719static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1720{
1721 unsigned int left = 0, right = type->cnt;
1722
1723 do {
1724 unsigned int mid = (right + left) / 2;
1725
1726 if (addr < type->regions[mid].base)
1727 right = mid;
1728 else if (addr >= (type->regions[mid].base +
1729 type->regions[mid].size))
1730 left = mid + 1;
1731 else
1732 return mid;
1733 } while (left < right);
1734 return -1;
1735}
1736
1737bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1738{
1739 return memblock_search(&memblock.reserved, addr) != -1;
1740}
1741
1742bool __init_memblock memblock_is_memory(phys_addr_t addr)
1743{
1744 return memblock_search(&memblock.memory, addr) != -1;
1745}
1746
1747bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1748{
1749 int i = memblock_search(&memblock.memory, addr);
1750
1751 if (i == -1)
1752 return false;
1753 return !memblock_is_nomap(&memblock.memory.regions[i]);
1754}
1755
1756int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1757 unsigned long *start_pfn, unsigned long *end_pfn)
1758{
1759 struct memblock_type *type = &memblock.memory;
1760 int mid = memblock_search(type, PFN_PHYS(pfn));
1761
1762 if (mid == -1)
1763 return -1;
1764
1765 *start_pfn = PFN_DOWN(type->regions[mid].base);
1766 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1767
1768 return memblock_get_region_node(&type->regions[mid]);
1769}
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1782{
1783 int idx = memblock_search(&memblock.memory, base);
1784 phys_addr_t end = base + memblock_cap_size(base, &size);
1785
1786 if (idx == -1)
1787 return false;
1788 return (memblock.memory.regions[idx].base +
1789 memblock.memory.regions[idx].size) >= end;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1804{
1805 return memblock_overlaps_region(&memblock.reserved, base, size);
1806}
1807
1808void __init_memblock memblock_trim_memory(phys_addr_t align)
1809{
1810 phys_addr_t start, end, orig_start, orig_end;
1811 struct memblock_region *r;
1812
1813 for_each_mem_region(r) {
1814 orig_start = r->base;
1815 orig_end = r->base + r->size;
1816 start = round_up(orig_start, align);
1817 end = round_down(orig_end, align);
1818
1819 if (start == orig_start && end == orig_end)
1820 continue;
1821
1822 if (start < end) {
1823 r->base = start;
1824 r->size = end - start;
1825 } else {
1826 memblock_remove_region(&memblock.memory,
1827 r - memblock.memory.regions);
1828 r--;
1829 }
1830 }
1831}
1832
1833void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1834{
1835 memblock.current_limit = limit;
1836}
1837
1838phys_addr_t __init_memblock memblock_get_current_limit(void)
1839{
1840 return memblock.current_limit;
1841}
1842
1843static void __init_memblock memblock_dump(struct memblock_type *type)
1844{
1845 phys_addr_t base, end, size;
1846 enum memblock_flags flags;
1847 int idx;
1848 struct memblock_region *rgn;
1849
1850 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1851
1852 for_each_memblock_type(idx, type, rgn) {
1853 char nid_buf[32] = "";
1854
1855 base = rgn->base;
1856 size = rgn->size;
1857 end = base + size - 1;
1858 flags = rgn->flags;
1859#ifdef CONFIG_NUMA
1860 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1861 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1862 memblock_get_region_node(rgn));
1863#endif
1864 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1865 type->name, idx, &base, &end, &size, nid_buf, flags);
1866 }
1867}
1868
1869static void __init_memblock __memblock_dump_all(void)
1870{
1871 pr_info("MEMBLOCK configuration:\n");
1872 pr_info(" memory size = %pa reserved size = %pa\n",
1873 &memblock.memory.total_size,
1874 &memblock.reserved.total_size);
1875
1876 memblock_dump(&memblock.memory);
1877 memblock_dump(&memblock.reserved);
1878#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1879 memblock_dump(&physmem);
1880#endif
1881}
1882
1883void __init_memblock memblock_dump_all(void)
1884{
1885 if (memblock_debug)
1886 __memblock_dump_all();
1887}
1888
1889void __init memblock_allow_resize(void)
1890{
1891 memblock_can_resize = 1;
1892}
1893
1894static int __init early_memblock(char *p)
1895{
1896 if (p && strstr(p, "debug"))
1897 memblock_debug = 1;
1898 return 0;
1899}
1900early_param("memblock", early_memblock);
1901
1902static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1903{
1904 struct page *start_pg, *end_pg;
1905 phys_addr_t pg, pgend;
1906
1907
1908
1909
1910 start_pg = pfn_to_page(start_pfn - 1) + 1;
1911 end_pg = pfn_to_page(end_pfn - 1) + 1;
1912
1913
1914
1915
1916
1917 pg = PAGE_ALIGN(__pa(start_pg));
1918 pgend = __pa(end_pg) & PAGE_MASK;
1919
1920
1921
1922
1923
1924 if (pg < pgend)
1925 memblock_free(pg, pgend - pg);
1926}
1927
1928
1929
1930
1931static void __init free_unused_memmap(void)
1932{
1933 unsigned long start, end, prev_end = 0;
1934 int i;
1935
1936 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1937 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1938 return;
1939
1940
1941
1942
1943
1944 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1945#ifdef CONFIG_SPARSEMEM
1946
1947
1948
1949
1950 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1951#endif
1952
1953
1954
1955
1956
1957 start = round_down(start, pageblock_nr_pages);
1958
1959
1960
1961
1962
1963 if (prev_end && prev_end < start)
1964 free_memmap(prev_end, start);
1965
1966
1967
1968
1969
1970
1971 prev_end = ALIGN(end, pageblock_nr_pages);
1972 }
1973
1974#ifdef CONFIG_SPARSEMEM
1975 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
1976 prev_end = ALIGN(end, pageblock_nr_pages);
1977 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
1978 }
1979#endif
1980}
1981
1982static void __init __free_pages_memory(unsigned long start, unsigned long end)
1983{
1984 int order;
1985
1986 while (start < end) {
1987 order = min(MAX_ORDER - 1UL, __ffs(start));
1988
1989 while (start + (1UL << order) > end)
1990 order--;
1991
1992 memblock_free_pages(pfn_to_page(start), start, order);
1993
1994 start += (1UL << order);
1995 }
1996}
1997
1998static unsigned long __init __free_memory_core(phys_addr_t start,
1999 phys_addr_t end)
2000{
2001 unsigned long start_pfn = PFN_UP(start);
2002 unsigned long end_pfn = min_t(unsigned long,
2003 PFN_DOWN(end), max_low_pfn);
2004
2005 if (start_pfn >= end_pfn)
2006 return 0;
2007
2008 __free_pages_memory(start_pfn, end_pfn);
2009
2010 return end_pfn - start_pfn;
2011}
2012
2013static void __init memmap_init_reserved_pages(void)
2014{
2015 struct memblock_region *region;
2016 phys_addr_t start, end;
2017 u64 i;
2018
2019
2020 for_each_reserved_mem_range(i, &start, &end)
2021 reserve_bootmem_region(start, end);
2022
2023
2024 for_each_mem_region(region) {
2025 if (memblock_is_nomap(region)) {
2026 start = region->base;
2027 end = start + region->size;
2028 reserve_bootmem_region(start, end);
2029 }
2030 }
2031}
2032
2033static unsigned long __init free_low_memory_core_early(void)
2034{
2035 unsigned long count = 0;
2036 phys_addr_t start, end;
2037 u64 i;
2038
2039 memblock_clear_hotplug(0, -1);
2040
2041 memmap_init_reserved_pages();
2042
2043
2044
2045
2046
2047
2048 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2049 NULL)
2050 count += __free_memory_core(start, end);
2051
2052 return count;
2053}
2054
2055static int reset_managed_pages_done __initdata;
2056
2057void reset_node_managed_pages(pg_data_t *pgdat)
2058{
2059 struct zone *z;
2060
2061 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2062 atomic_long_set(&z->managed_pages, 0);
2063}
2064
2065void __init reset_all_zones_managed_pages(void)
2066{
2067 struct pglist_data *pgdat;
2068
2069 if (reset_managed_pages_done)
2070 return;
2071
2072 for_each_online_pgdat(pgdat)
2073 reset_node_managed_pages(pgdat);
2074
2075 reset_managed_pages_done = 1;
2076}
2077
2078
2079
2080
2081void __init memblock_free_all(void)
2082{
2083 unsigned long pages;
2084
2085 free_unused_memmap();
2086 reset_all_zones_managed_pages();
2087
2088 pages = free_low_memory_core_early();
2089 totalram_pages_add(pages);
2090}
2091
2092#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2093
2094static int memblock_debug_show(struct seq_file *m, void *private)
2095{
2096 struct memblock_type *type = m->private;
2097 struct memblock_region *reg;
2098 int i;
2099 phys_addr_t end;
2100
2101 for (i = 0; i < type->cnt; i++) {
2102 reg = &type->regions[i];
2103 end = reg->base + reg->size - 1;
2104
2105 seq_printf(m, "%4d: ", i);
2106 seq_printf(m, "%pa..%pa\n", ®->base, &end);
2107 }
2108 return 0;
2109}
2110DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2111
2112static int __init memblock_init_debugfs(void)
2113{
2114 struct dentry *root = debugfs_create_dir("memblock", NULL);
2115
2116 debugfs_create_file("memory", 0444, root,
2117 &memblock.memory, &memblock_debug_fops);
2118 debugfs_create_file("reserved", 0444, root,
2119 &memblock.reserved, &memblock_debug_fops);
2120#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2121 debugfs_create_file("physmem", 0444, root, &physmem,
2122 &memblock_debug_fops);
2123#endif
2124
2125 return 0;
2126}
2127__initcall(memblock_init_debugfs);
2128
2129#endif
2130