1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/bitops.h>
13#include <linux/poison.h>
14#include <linux/pfn.h>
15#include <linux/debugfs.h>
16#include <linux/kmemleak.h>
17#include <linux/seq_file.h>
18#include <linux/memblock.h>
19
20#include <asm/sections.h>
21#include <linux/io.h>
22
23#include "internal.h"
24
25#define INIT_MEMBLOCK_REGIONS 128
26#define INIT_PHYSMEM_REGIONS 4
27
28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30#endif
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94#ifndef CONFIG_NEED_MULTIPLE_NODES
95struct pglist_data __refdata contig_page_data;
96EXPORT_SYMBOL(contig_page_data);
97#endif
98
99unsigned long max_low_pfn;
100unsigned long min_low_pfn;
101unsigned long max_pfn;
102unsigned long long max_possible_pfn;
103
104static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
105static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
106#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
107static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
108#endif
109
110struct memblock memblock __initdata_memblock = {
111 .memory.regions = memblock_memory_init_regions,
112 .memory.cnt = 1,
113 .memory.max = INIT_MEMBLOCK_REGIONS,
114 .memory.name = "memory",
115
116 .reserved.regions = memblock_reserved_init_regions,
117 .reserved.cnt = 1,
118 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
119 .reserved.name = "reserved",
120
121#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
122 .physmem.regions = memblock_physmem_init_regions,
123 .physmem.cnt = 1,
124 .physmem.max = INIT_PHYSMEM_REGIONS,
125 .physmem.name = "physmem",
126#endif
127
128 .bottom_up = false,
129 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
130};
131
132int memblock_debug __initdata_memblock;
133static bool system_has_some_mirror __initdata_memblock = false;
134static int memblock_can_resize __initdata_memblock;
135static int memblock_memory_in_slab __initdata_memblock = 0;
136static int memblock_reserved_in_slab __initdata_memblock = 0;
137
138static enum memblock_flags __init_memblock choose_memblock_flags(void)
139{
140 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
141}
142
143
144static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
145{
146 return *size = min(*size, PHYS_ADDR_MAX - base);
147}
148
149
150
151
152static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
153 phys_addr_t base2, phys_addr_t size2)
154{
155 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
156}
157
158bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
159 phys_addr_t base, phys_addr_t size)
160{
161 unsigned long i;
162
163 for (i = 0; i < type->cnt; i++)
164 if (memblock_addrs_overlap(base, size, type->regions[i].base,
165 type->regions[i].size))
166 break;
167 return i < type->cnt;
168}
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185static phys_addr_t __init_memblock
186__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
187 phys_addr_t size, phys_addr_t align, int nid,
188 enum memblock_flags flags)
189{
190 phys_addr_t this_start, this_end, cand;
191 u64 i;
192
193 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
194 this_start = clamp(this_start, start, end);
195 this_end = clamp(this_end, start, end);
196
197 cand = round_up(this_start, align);
198 if (cand < this_end && this_end - cand >= size)
199 return cand;
200 }
201
202 return 0;
203}
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220static phys_addr_t __init_memblock
221__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
222 phys_addr_t size, phys_addr_t align, int nid,
223 enum memblock_flags flags)
224{
225 phys_addr_t this_start, this_end, cand;
226 u64 i;
227
228 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
229 NULL) {
230 this_start = clamp(this_start, start, end);
231 this_end = clamp(this_end, start, end);
232
233 if (this_end < size)
234 continue;
235
236 cand = round_down(this_end - size, align);
237 if (cand >= this_start)
238 return cand;
239 }
240
241 return 0;
242}
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
268 phys_addr_t align, phys_addr_t start,
269 phys_addr_t end, int nid,
270 enum memblock_flags flags)
271{
272 phys_addr_t kernel_end, ret;
273
274
275 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
276 end == MEMBLOCK_ALLOC_KASAN)
277 end = memblock.current_limit;
278
279
280 start = max_t(phys_addr_t, start, PAGE_SIZE);
281 end = max(start, end);
282 kernel_end = __pa_symbol(_end);
283
284
285
286
287
288 if (memblock_bottom_up() && end > kernel_end) {
289 phys_addr_t bottom_up_start;
290
291
292 bottom_up_start = max(start, kernel_end);
293
294
295 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
296 size, align, nid, flags);
297 if (ret)
298 return ret;
299
300
301
302
303
304
305
306
307
308
309
310 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
311 "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
312 }
313
314 return __memblock_find_range_top_down(start, end, size, align, nid,
315 flags);
316}
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
332 phys_addr_t end, phys_addr_t size,
333 phys_addr_t align)
334{
335 phys_addr_t ret;
336 enum memblock_flags flags = choose_memblock_flags();
337
338again:
339 ret = memblock_find_in_range_node(size, align, start, end,
340 NUMA_NO_NODE, flags);
341
342 if (!ret && (flags & MEMBLOCK_MIRROR)) {
343 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
344 &size);
345 flags &= ~MEMBLOCK_MIRROR;
346 goto again;
347 }
348
349 return ret;
350}
351
352static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
353{
354 type->total_size -= type->regions[r].size;
355 memmove(&type->regions[r], &type->regions[r + 1],
356 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
357 type->cnt--;
358
359
360 if (type->cnt == 0) {
361 WARN_ON(type->total_size != 0);
362 type->cnt = 1;
363 type->regions[0].base = 0;
364 type->regions[0].size = 0;
365 type->regions[0].flags = 0;
366 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
367 }
368}
369
370#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
371
372
373
374void __init memblock_discard(void)
375{
376 phys_addr_t addr, size;
377
378 if (memblock.reserved.regions != memblock_reserved_init_regions) {
379 addr = __pa(memblock.reserved.regions);
380 size = PAGE_ALIGN(sizeof(struct memblock_region) *
381 memblock.reserved.max);
382 __memblock_free_late(addr, size);
383 }
384
385 if (memblock.memory.regions != memblock_memory_init_regions) {
386 addr = __pa(memblock.memory.regions);
387 size = PAGE_ALIGN(sizeof(struct memblock_region) *
388 memblock.memory.max);
389 __memblock_free_late(addr, size);
390 }
391}
392#endif
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409static int __init_memblock memblock_double_array(struct memblock_type *type,
410 phys_addr_t new_area_start,
411 phys_addr_t new_area_size)
412{
413 struct memblock_region *new_array, *old_array;
414 phys_addr_t old_alloc_size, new_alloc_size;
415 phys_addr_t old_size, new_size, addr, new_end;
416 int use_slab = slab_is_available();
417 int *in_slab;
418
419
420
421
422 if (!memblock_can_resize)
423 return -1;
424
425
426 old_size = type->max * sizeof(struct memblock_region);
427 new_size = old_size << 1;
428
429
430
431
432 old_alloc_size = PAGE_ALIGN(old_size);
433 new_alloc_size = PAGE_ALIGN(new_size);
434
435
436 if (type == &memblock.memory)
437 in_slab = &memblock_memory_in_slab;
438 else
439 in_slab = &memblock_reserved_in_slab;
440
441
442 if (use_slab) {
443 new_array = kmalloc(new_size, GFP_KERNEL);
444 addr = new_array ? __pa(new_array) : 0;
445 } else {
446
447 if (type != &memblock.reserved)
448 new_area_start = new_area_size = 0;
449
450 addr = memblock_find_in_range(new_area_start + new_area_size,
451 memblock.current_limit,
452 new_alloc_size, PAGE_SIZE);
453 if (!addr && new_area_size)
454 addr = memblock_find_in_range(0,
455 min(new_area_start, memblock.current_limit),
456 new_alloc_size, PAGE_SIZE);
457
458 new_array = addr ? __va(addr) : NULL;
459 }
460 if (!addr) {
461 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
462 type->name, type->max, type->max * 2);
463 return -1;
464 }
465
466 new_end = addr + new_size - 1;
467 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
468 type->name, type->max * 2, &addr, &new_end);
469
470
471
472
473
474
475 memcpy(new_array, type->regions, old_size);
476 memset(new_array + type->max, 0, old_size);
477 old_array = type->regions;
478 type->regions = new_array;
479 type->max <<= 1;
480
481
482 if (*in_slab)
483 kfree(old_array);
484 else if (old_array != memblock_memory_init_regions &&
485 old_array != memblock_reserved_init_regions)
486 memblock_free(__pa(old_array), old_alloc_size);
487
488
489
490
491
492 if (!use_slab)
493 BUG_ON(memblock_reserve(addr, new_alloc_size));
494
495
496 *in_slab = use_slab;
497
498 return 0;
499}
500
501
502
503
504
505
506
507static void __init_memblock memblock_merge_regions(struct memblock_type *type)
508{
509 int i = 0;
510
511
512 while (i < type->cnt - 1) {
513 struct memblock_region *this = &type->regions[i];
514 struct memblock_region *next = &type->regions[i + 1];
515
516 if (this->base + this->size != next->base ||
517 memblock_get_region_node(this) !=
518 memblock_get_region_node(next) ||
519 this->flags != next->flags) {
520 BUG_ON(this->base + this->size > next->base);
521 i++;
522 continue;
523 }
524
525 this->size += next->size;
526
527 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
528 type->cnt--;
529 }
530}
531
532
533
534
535
536
537
538
539
540
541
542
543
544static void __init_memblock memblock_insert_region(struct memblock_type *type,
545 int idx, phys_addr_t base,
546 phys_addr_t size,
547 int nid,
548 enum memblock_flags flags)
549{
550 struct memblock_region *rgn = &type->regions[idx];
551
552 BUG_ON(type->cnt >= type->max);
553 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
554 rgn->base = base;
555 rgn->size = size;
556 rgn->flags = flags;
557 memblock_set_region_node(rgn, nid);
558 type->cnt++;
559 type->total_size += size;
560}
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578static int __init_memblock memblock_add_range(struct memblock_type *type,
579 phys_addr_t base, phys_addr_t size,
580 int nid, enum memblock_flags flags)
581{
582 bool insert = false;
583 phys_addr_t obase = base;
584 phys_addr_t end = base + memblock_cap_size(base, &size);
585 int idx, nr_new;
586 struct memblock_region *rgn;
587
588 if (!size)
589 return 0;
590
591
592 if (type->regions[0].size == 0) {
593 WARN_ON(type->cnt != 1 || type->total_size);
594 type->regions[0].base = base;
595 type->regions[0].size = size;
596 type->regions[0].flags = flags;
597 memblock_set_region_node(&type->regions[0], nid);
598 type->total_size = size;
599 return 0;
600 }
601repeat:
602
603
604
605
606
607 base = obase;
608 nr_new = 0;
609
610 for_each_memblock_type(idx, type, rgn) {
611 phys_addr_t rbase = rgn->base;
612 phys_addr_t rend = rbase + rgn->size;
613
614 if (rbase >= end)
615 break;
616 if (rend <= base)
617 continue;
618
619
620
621
622 if (rbase > base) {
623#ifdef CONFIG_NEED_MULTIPLE_NODES
624 WARN_ON(nid != memblock_get_region_node(rgn));
625#endif
626 WARN_ON(flags != rgn->flags);
627 nr_new++;
628 if (insert)
629 memblock_insert_region(type, idx++, base,
630 rbase - base, nid,
631 flags);
632 }
633
634 base = min(rend, end);
635 }
636
637
638 if (base < end) {
639 nr_new++;
640 if (insert)
641 memblock_insert_region(type, idx, base, end - base,
642 nid, flags);
643 }
644
645 if (!nr_new)
646 return 0;
647
648
649
650
651
652 if (!insert) {
653 while (type->cnt + nr_new > type->max)
654 if (memblock_double_array(type, obase, size) < 0)
655 return -ENOMEM;
656 insert = true;
657 goto repeat;
658 } else {
659 memblock_merge_regions(type);
660 return 0;
661 }
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
677 int nid)
678{
679 return memblock_add_range(&memblock.memory, base, size, nid, 0);
680}
681
682
683
684
685
686
687
688
689
690
691
692
693int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
694{
695 phys_addr_t end = base + size - 1;
696
697 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
698 &base, &end, (void *)_RET_IP_);
699
700 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
701}
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719static int __init_memblock memblock_isolate_range(struct memblock_type *type,
720 phys_addr_t base, phys_addr_t size,
721 int *start_rgn, int *end_rgn)
722{
723 phys_addr_t end = base + memblock_cap_size(base, &size);
724 int idx;
725 struct memblock_region *rgn;
726
727 *start_rgn = *end_rgn = 0;
728
729 if (!size)
730 return 0;
731
732
733 while (type->cnt + 2 > type->max)
734 if (memblock_double_array(type, base, size) < 0)
735 return -ENOMEM;
736
737 for_each_memblock_type(idx, type, rgn) {
738 phys_addr_t rbase = rgn->base;
739 phys_addr_t rend = rbase + rgn->size;
740
741 if (rbase >= end)
742 break;
743 if (rend <= base)
744 continue;
745
746 if (rbase < base) {
747
748
749
750
751 rgn->base = base;
752 rgn->size -= base - rbase;
753 type->total_size -= base - rbase;
754 memblock_insert_region(type, idx, rbase, base - rbase,
755 memblock_get_region_node(rgn),
756 rgn->flags);
757 } else if (rend > end) {
758
759
760
761
762 rgn->base = end;
763 rgn->size -= end - rbase;
764 type->total_size -= end - rbase;
765 memblock_insert_region(type, idx--, rbase, end - rbase,
766 memblock_get_region_node(rgn),
767 rgn->flags);
768 } else {
769
770 if (!*end_rgn)
771 *start_rgn = idx;
772 *end_rgn = idx + 1;
773 }
774 }
775
776 return 0;
777}
778
779static int __init_memblock memblock_remove_range(struct memblock_type *type,
780 phys_addr_t base, phys_addr_t size)
781{
782 int start_rgn, end_rgn;
783 int i, ret;
784
785 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
786 if (ret)
787 return ret;
788
789 for (i = end_rgn - 1; i >= start_rgn; i--)
790 memblock_remove_region(type, i);
791 return 0;
792}
793
794int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
795{
796 phys_addr_t end = base + size - 1;
797
798 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
799 &base, &end, (void *)_RET_IP_);
800
801 return memblock_remove_range(&memblock.memory, base, size);
802}
803
804
805
806
807
808
809
810
811
812int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
813{
814 phys_addr_t end = base + size - 1;
815
816 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
817 &base, &end, (void *)_RET_IP_);
818
819 kmemleak_free_part_phys(base, size);
820 return memblock_remove_range(&memblock.reserved, base, size);
821}
822
823int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
824{
825 phys_addr_t end = base + size - 1;
826
827 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
828 &base, &end, (void *)_RET_IP_);
829
830 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
831}
832
833#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
834int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
835{
836 phys_addr_t end = base + size - 1;
837
838 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
839 &base, &end, (void *)_RET_IP_);
840
841 return memblock_add_range(&memblock.physmem, base, size, MAX_NUMNODES, 0);
842}
843#endif
844
845
846
847
848
849
850
851
852
853
854
855
856static int __init_memblock memblock_setclr_flag(phys_addr_t base,
857 phys_addr_t size, int set, int flag)
858{
859 struct memblock_type *type = &memblock.memory;
860 int i, ret, start_rgn, end_rgn;
861
862 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
863 if (ret)
864 return ret;
865
866 for (i = start_rgn; i < end_rgn; i++) {
867 struct memblock_region *r = &type->regions[i];
868
869 if (set)
870 r->flags |= flag;
871 else
872 r->flags &= ~flag;
873 }
874
875 memblock_merge_regions(type);
876 return 0;
877}
878
879
880
881
882
883
884
885
886int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
887{
888 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
889}
890
891
892
893
894
895
896
897
898int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
899{
900 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
901}
902
903
904
905
906
907
908
909
910int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
911{
912 system_has_some_mirror = true;
913
914 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
915}
916
917
918
919
920
921
922
923
924int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
925{
926 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
927}
928
929
930
931
932
933
934
935
936int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
937{
938 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
939}
940
941
942
943
944
945
946
947
948
949void __init_memblock __next_reserved_mem_region(u64 *idx,
950 phys_addr_t *out_start,
951 phys_addr_t *out_end)
952{
953 struct memblock_type *type = &memblock.reserved;
954
955 if (*idx < type->cnt) {
956 struct memblock_region *r = &type->regions[*idx];
957 phys_addr_t base = r->base;
958 phys_addr_t size = r->size;
959
960 if (out_start)
961 *out_start = base;
962 if (out_end)
963 *out_end = base + size - 1;
964
965 *idx += 1;
966 return;
967 }
968
969
970 *idx = ULLONG_MAX;
971}
972
973static bool should_skip_region(struct memblock_region *m, int nid, int flags)
974{
975 int m_nid = memblock_get_region_node(m);
976
977
978 if (nid != NUMA_NO_NODE && nid != m_nid)
979 return true;
980
981
982 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
983 return true;
984
985
986 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
987 return true;
988
989
990 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
991 return true;
992
993 return false;
994}
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022void __init_memblock __next_mem_range(u64 *idx, int nid,
1023 enum memblock_flags flags,
1024 struct memblock_type *type_a,
1025 struct memblock_type *type_b,
1026 phys_addr_t *out_start,
1027 phys_addr_t *out_end, int *out_nid)
1028{
1029 int idx_a = *idx & 0xffffffff;
1030 int idx_b = *idx >> 32;
1031
1032 if (WARN_ONCE(nid == MAX_NUMNODES,
1033 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1034 nid = NUMA_NO_NODE;
1035
1036 for (; idx_a < type_a->cnt; idx_a++) {
1037 struct memblock_region *m = &type_a->regions[idx_a];
1038
1039 phys_addr_t m_start = m->base;
1040 phys_addr_t m_end = m->base + m->size;
1041 int m_nid = memblock_get_region_node(m);
1042
1043 if (should_skip_region(m, nid, flags))
1044 continue;
1045
1046 if (!type_b) {
1047 if (out_start)
1048 *out_start = m_start;
1049 if (out_end)
1050 *out_end = m_end;
1051 if (out_nid)
1052 *out_nid = m_nid;
1053 idx_a++;
1054 *idx = (u32)idx_a | (u64)idx_b << 32;
1055 return;
1056 }
1057
1058
1059 for (; idx_b < type_b->cnt + 1; idx_b++) {
1060 struct memblock_region *r;
1061 phys_addr_t r_start;
1062 phys_addr_t r_end;
1063
1064 r = &type_b->regions[idx_b];
1065 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1066 r_end = idx_b < type_b->cnt ?
1067 r->base : PHYS_ADDR_MAX;
1068
1069
1070
1071
1072
1073 if (r_start >= m_end)
1074 break;
1075
1076 if (m_start < r_end) {
1077 if (out_start)
1078 *out_start =
1079 max(m_start, r_start);
1080 if (out_end)
1081 *out_end = min(m_end, r_end);
1082 if (out_nid)
1083 *out_nid = m_nid;
1084
1085
1086
1087
1088 if (m_end <= r_end)
1089 idx_a++;
1090 else
1091 idx_b++;
1092 *idx = (u32)idx_a | (u64)idx_b << 32;
1093 return;
1094 }
1095 }
1096 }
1097
1098
1099 *idx = ULLONG_MAX;
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1120 enum memblock_flags flags,
1121 struct memblock_type *type_a,
1122 struct memblock_type *type_b,
1123 phys_addr_t *out_start,
1124 phys_addr_t *out_end, int *out_nid)
1125{
1126 int idx_a = *idx & 0xffffffff;
1127 int idx_b = *idx >> 32;
1128
1129 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1130 nid = NUMA_NO_NODE;
1131
1132 if (*idx == (u64)ULLONG_MAX) {
1133 idx_a = type_a->cnt - 1;
1134 if (type_b != NULL)
1135 idx_b = type_b->cnt;
1136 else
1137 idx_b = 0;
1138 }
1139
1140 for (; idx_a >= 0; idx_a--) {
1141 struct memblock_region *m = &type_a->regions[idx_a];
1142
1143 phys_addr_t m_start = m->base;
1144 phys_addr_t m_end = m->base + m->size;
1145 int m_nid = memblock_get_region_node(m);
1146
1147 if (should_skip_region(m, nid, flags))
1148 continue;
1149
1150 if (!type_b) {
1151 if (out_start)
1152 *out_start = m_start;
1153 if (out_end)
1154 *out_end = m_end;
1155 if (out_nid)
1156 *out_nid = m_nid;
1157 idx_a--;
1158 *idx = (u32)idx_a | (u64)idx_b << 32;
1159 return;
1160 }
1161
1162
1163 for (; idx_b >= 0; idx_b--) {
1164 struct memblock_region *r;
1165 phys_addr_t r_start;
1166 phys_addr_t r_end;
1167
1168 r = &type_b->regions[idx_b];
1169 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1170 r_end = idx_b < type_b->cnt ?
1171 r->base : PHYS_ADDR_MAX;
1172
1173
1174
1175
1176
1177 if (r_end <= m_start)
1178 break;
1179
1180 if (m_end > r_start) {
1181 if (out_start)
1182 *out_start = max(m_start, r_start);
1183 if (out_end)
1184 *out_end = min(m_end, r_end);
1185 if (out_nid)
1186 *out_nid = m_nid;
1187 if (m_start >= r_start)
1188 idx_a--;
1189 else
1190 idx_b--;
1191 *idx = (u32)idx_a | (u64)idx_b << 32;
1192 return;
1193 }
1194 }
1195 }
1196
1197 *idx = ULLONG_MAX;
1198}
1199
1200
1201
1202
1203void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1204 unsigned long *out_start_pfn,
1205 unsigned long *out_end_pfn, int *out_nid)
1206{
1207 struct memblock_type *type = &memblock.memory;
1208 struct memblock_region *r;
1209 int r_nid;
1210
1211 while (++*idx < type->cnt) {
1212 r = &type->regions[*idx];
1213 r_nid = memblock_get_region_node(r);
1214
1215 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1216 continue;
1217 if (nid == MAX_NUMNODES || nid == r_nid)
1218 break;
1219 }
1220 if (*idx >= type->cnt) {
1221 *idx = -1;
1222 return;
1223 }
1224
1225 if (out_start_pfn)
1226 *out_start_pfn = PFN_UP(r->base);
1227 if (out_end_pfn)
1228 *out_end_pfn = PFN_DOWN(r->base + r->size);
1229 if (out_nid)
1230 *out_nid = r_nid;
1231}
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1247 struct memblock_type *type, int nid)
1248{
1249#ifdef CONFIG_NEED_MULTIPLE_NODES
1250 int start_rgn, end_rgn;
1251 int i, ret;
1252
1253 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1254 if (ret)
1255 return ret;
1256
1257 for (i = start_rgn; i < end_rgn; i++)
1258 memblock_set_region_node(&type->regions[i], nid);
1259
1260 memblock_merge_regions(type);
1261#endif
1262 return 0;
1263}
1264
1265#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281void __init_memblock
1282__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1283 unsigned long *out_spfn, unsigned long *out_epfn)
1284{
1285 int zone_nid = zone_to_nid(zone);
1286 phys_addr_t spa, epa;
1287 int nid;
1288
1289 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1290 &memblock.memory, &memblock.reserved,
1291 &spa, &epa, &nid);
1292
1293 while (*idx != U64_MAX) {
1294 unsigned long epfn = PFN_DOWN(epa);
1295 unsigned long spfn = PFN_UP(spa);
1296
1297
1298
1299
1300
1301 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1302
1303 if (zone_end_pfn(zone) <= spfn) {
1304 *idx = U64_MAX;
1305 break;
1306 }
1307
1308 if (out_spfn)
1309 *out_spfn = max(zone->zone_start_pfn, spfn);
1310 if (out_epfn)
1311 *out_epfn = min(zone_end_pfn(zone), epfn);
1312
1313 return;
1314 }
1315
1316 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1317 &memblock.memory, &memblock.reserved,
1318 &spa, &epa, &nid);
1319 }
1320
1321
1322 if (out_spfn)
1323 *out_spfn = ULONG_MAX;
1324 if (out_epfn)
1325 *out_epfn = 0;
1326}
1327
1328#endif
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1356 phys_addr_t align, phys_addr_t start,
1357 phys_addr_t end, int nid,
1358 bool exact_nid)
1359{
1360 enum memblock_flags flags = choose_memblock_flags();
1361 phys_addr_t found;
1362
1363 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1364 nid = NUMA_NO_NODE;
1365
1366 if (!align) {
1367
1368 dump_stack();
1369 align = SMP_CACHE_BYTES;
1370 }
1371
1372again:
1373 found = memblock_find_in_range_node(size, align, start, end, nid,
1374 flags);
1375 if (found && !memblock_reserve(found, size))
1376 goto done;
1377
1378 if (nid != NUMA_NO_NODE && !exact_nid) {
1379 found = memblock_find_in_range_node(size, align, start,
1380 end, NUMA_NO_NODE,
1381 flags);
1382 if (found && !memblock_reserve(found, size))
1383 goto done;
1384 }
1385
1386 if (flags & MEMBLOCK_MIRROR) {
1387 flags &= ~MEMBLOCK_MIRROR;
1388 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1389 &size);
1390 goto again;
1391 }
1392
1393 return 0;
1394
1395done:
1396
1397 if (end != MEMBLOCK_ALLOC_KASAN)
1398
1399
1400
1401
1402
1403
1404 kmemleak_alloc_phys(found, size, 0, 0);
1405
1406 return found;
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1422 phys_addr_t align,
1423 phys_addr_t start,
1424 phys_addr_t end)
1425{
1426 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1427 false);
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1444{
1445 return memblock_alloc_range_nid(size, align, 0,
1446 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1447}
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469static void * __init memblock_alloc_internal(
1470 phys_addr_t size, phys_addr_t align,
1471 phys_addr_t min_addr, phys_addr_t max_addr,
1472 int nid, bool exact_nid)
1473{
1474 phys_addr_t alloc;
1475
1476
1477
1478
1479
1480
1481 if (WARN_ON_ONCE(slab_is_available()))
1482 return kzalloc_node(size, GFP_NOWAIT, nid);
1483
1484 if (max_addr > memblock.current_limit)
1485 max_addr = memblock.current_limit;
1486
1487 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1488 exact_nid);
1489
1490
1491 if (!alloc && min_addr)
1492 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1493 exact_nid);
1494
1495 if (!alloc)
1496 return NULL;
1497
1498 return phys_to_virt(alloc);
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519void * __init memblock_alloc_exact_nid_raw(
1520 phys_addr_t size, phys_addr_t align,
1521 phys_addr_t min_addr, phys_addr_t max_addr,
1522 int nid)
1523{
1524 void *ptr;
1525
1526 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1527 __func__, (u64)size, (u64)align, nid, &min_addr,
1528 &max_addr, (void *)_RET_IP_);
1529
1530 ptr = memblock_alloc_internal(size, align,
1531 min_addr, max_addr, nid, true);
1532 if (ptr && size > 0)
1533 page_init_poison(ptr, size);
1534
1535 return ptr;
1536}
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557void * __init memblock_alloc_try_nid_raw(
1558 phys_addr_t size, phys_addr_t align,
1559 phys_addr_t min_addr, phys_addr_t max_addr,
1560 int nid)
1561{
1562 void *ptr;
1563
1564 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1565 __func__, (u64)size, (u64)align, nid, &min_addr,
1566 &max_addr, (void *)_RET_IP_);
1567
1568 ptr = memblock_alloc_internal(size, align,
1569 min_addr, max_addr, nid, false);
1570 if (ptr && size > 0)
1571 page_init_poison(ptr, size);
1572
1573 return ptr;
1574}
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593void * __init memblock_alloc_try_nid(
1594 phys_addr_t size, phys_addr_t align,
1595 phys_addr_t min_addr, phys_addr_t max_addr,
1596 int nid)
1597{
1598 void *ptr;
1599
1600 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1601 __func__, (u64)size, (u64)align, nid, &min_addr,
1602 &max_addr, (void *)_RET_IP_);
1603 ptr = memblock_alloc_internal(size, align,
1604 min_addr, max_addr, nid, false);
1605 if (ptr)
1606 memset(ptr, 0, size);
1607
1608 return ptr;
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1621{
1622 phys_addr_t cursor, end;
1623
1624 end = base + size - 1;
1625 memblock_dbg("%s: [%pa-%pa] %pS\n",
1626 __func__, &base, &end, (void *)_RET_IP_);
1627 kmemleak_free_part_phys(base, size);
1628 cursor = PFN_UP(base);
1629 end = PFN_DOWN(base + size);
1630
1631 for (; cursor < end; cursor++) {
1632 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1633 totalram_pages_inc();
1634 }
1635}
1636
1637
1638
1639
1640
1641phys_addr_t __init_memblock memblock_phys_mem_size(void)
1642{
1643 return memblock.memory.total_size;
1644}
1645
1646phys_addr_t __init_memblock memblock_reserved_size(void)
1647{
1648 return memblock.reserved.total_size;
1649}
1650
1651phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1652{
1653 unsigned long pages = 0;
1654 struct memblock_region *r;
1655 unsigned long start_pfn, end_pfn;
1656
1657 for_each_memblock(memory, r) {
1658 start_pfn = memblock_region_memory_base_pfn(r);
1659 end_pfn = memblock_region_memory_end_pfn(r);
1660 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1661 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1662 pages += end_pfn - start_pfn;
1663 }
1664
1665 return PFN_PHYS(pages);
1666}
1667
1668
1669phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1670{
1671 return memblock.memory.regions[0].base;
1672}
1673
1674phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1675{
1676 int idx = memblock.memory.cnt - 1;
1677
1678 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1679}
1680
1681static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1682{
1683 phys_addr_t max_addr = PHYS_ADDR_MAX;
1684 struct memblock_region *r;
1685
1686
1687
1688
1689
1690
1691 for_each_memblock(memory, r) {
1692 if (limit <= r->size) {
1693 max_addr = r->base + limit;
1694 break;
1695 }
1696 limit -= r->size;
1697 }
1698
1699 return max_addr;
1700}
1701
1702void __init memblock_enforce_memory_limit(phys_addr_t limit)
1703{
1704 phys_addr_t max_addr;
1705
1706 if (!limit)
1707 return;
1708
1709 max_addr = __find_max_addr(limit);
1710
1711
1712 if (max_addr == PHYS_ADDR_MAX)
1713 return;
1714
1715
1716 memblock_remove_range(&memblock.memory, max_addr,
1717 PHYS_ADDR_MAX);
1718 memblock_remove_range(&memblock.reserved, max_addr,
1719 PHYS_ADDR_MAX);
1720}
1721
1722void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1723{
1724 int start_rgn, end_rgn;
1725 int i, ret;
1726
1727 if (!size)
1728 return;
1729
1730 ret = memblock_isolate_range(&memblock.memory, base, size,
1731 &start_rgn, &end_rgn);
1732 if (ret)
1733 return;
1734
1735
1736 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1737 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1738 memblock_remove_region(&memblock.memory, i);
1739
1740 for (i = start_rgn - 1; i >= 0; i--)
1741 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1742 memblock_remove_region(&memblock.memory, i);
1743
1744
1745 memblock_remove_range(&memblock.reserved, 0, base);
1746 memblock_remove_range(&memblock.reserved,
1747 base + size, PHYS_ADDR_MAX);
1748}
1749
1750void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1751{
1752 phys_addr_t max_addr;
1753
1754 if (!limit)
1755 return;
1756
1757 max_addr = __find_max_addr(limit);
1758
1759
1760 if (max_addr == PHYS_ADDR_MAX)
1761 return;
1762
1763 memblock_cap_memory_range(0, max_addr);
1764}
1765
1766static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1767{
1768 unsigned int left = 0, right = type->cnt;
1769
1770 do {
1771 unsigned int mid = (right + left) / 2;
1772
1773 if (addr < type->regions[mid].base)
1774 right = mid;
1775 else if (addr >= (type->regions[mid].base +
1776 type->regions[mid].size))
1777 left = mid + 1;
1778 else
1779 return mid;
1780 } while (left < right);
1781 return -1;
1782}
1783
1784bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1785{
1786 return memblock_search(&memblock.reserved, addr) != -1;
1787}
1788
1789bool __init_memblock memblock_is_memory(phys_addr_t addr)
1790{
1791 return memblock_search(&memblock.memory, addr) != -1;
1792}
1793
1794bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1795{
1796 int i = memblock_search(&memblock.memory, addr);
1797
1798 if (i == -1)
1799 return false;
1800 return !memblock_is_nomap(&memblock.memory.regions[i]);
1801}
1802
1803int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1804 unsigned long *start_pfn, unsigned long *end_pfn)
1805{
1806 struct memblock_type *type = &memblock.memory;
1807 int mid = memblock_search(type, PFN_PHYS(pfn));
1808
1809 if (mid == -1)
1810 return -1;
1811
1812 *start_pfn = PFN_DOWN(type->regions[mid].base);
1813 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1814
1815 return memblock_get_region_node(&type->regions[mid]);
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1829{
1830 int idx = memblock_search(&memblock.memory, base);
1831 phys_addr_t end = base + memblock_cap_size(base, &size);
1832
1833 if (idx == -1)
1834 return false;
1835 return (memblock.memory.regions[idx].base +
1836 memblock.memory.regions[idx].size) >= end;
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1851{
1852 memblock_cap_size(base, &size);
1853 return memblock_overlaps_region(&memblock.reserved, base, size);
1854}
1855
1856void __init_memblock memblock_trim_memory(phys_addr_t align)
1857{
1858 phys_addr_t start, end, orig_start, orig_end;
1859 struct memblock_region *r;
1860
1861 for_each_memblock(memory, r) {
1862 orig_start = r->base;
1863 orig_end = r->base + r->size;
1864 start = round_up(orig_start, align);
1865 end = round_down(orig_end, align);
1866
1867 if (start == orig_start && end == orig_end)
1868 continue;
1869
1870 if (start < end) {
1871 r->base = start;
1872 r->size = end - start;
1873 } else {
1874 memblock_remove_region(&memblock.memory,
1875 r - memblock.memory.regions);
1876 r--;
1877 }
1878 }
1879}
1880
1881void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1882{
1883 memblock.current_limit = limit;
1884}
1885
1886phys_addr_t __init_memblock memblock_get_current_limit(void)
1887{
1888 return memblock.current_limit;
1889}
1890
1891static void __init_memblock memblock_dump(struct memblock_type *type)
1892{
1893 phys_addr_t base, end, size;
1894 enum memblock_flags flags;
1895 int idx;
1896 struct memblock_region *rgn;
1897
1898 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1899
1900 for_each_memblock_type(idx, type, rgn) {
1901 char nid_buf[32] = "";
1902
1903 base = rgn->base;
1904 size = rgn->size;
1905 end = base + size - 1;
1906 flags = rgn->flags;
1907#ifdef CONFIG_NEED_MULTIPLE_NODES
1908 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1909 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1910 memblock_get_region_node(rgn));
1911#endif
1912 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1913 type->name, idx, &base, &end, &size, nid_buf, flags);
1914 }
1915}
1916
1917void __init_memblock __memblock_dump_all(void)
1918{
1919 pr_info("MEMBLOCK configuration:\n");
1920 pr_info(" memory size = %pa reserved size = %pa\n",
1921 &memblock.memory.total_size,
1922 &memblock.reserved.total_size);
1923
1924 memblock_dump(&memblock.memory);
1925 memblock_dump(&memblock.reserved);
1926#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1927 memblock_dump(&memblock.physmem);
1928#endif
1929}
1930
1931void __init memblock_allow_resize(void)
1932{
1933 memblock_can_resize = 1;
1934}
1935
1936static int __init early_memblock(char *p)
1937{
1938 if (p && strstr(p, "debug"))
1939 memblock_debug = 1;
1940 return 0;
1941}
1942early_param("memblock", early_memblock);
1943
1944static void __init __free_pages_memory(unsigned long start, unsigned long end)
1945{
1946 int order;
1947
1948 while (start < end) {
1949 order = min(MAX_ORDER - 1UL, __ffs(start));
1950
1951 while (start + (1UL << order) > end)
1952 order--;
1953
1954 memblock_free_pages(pfn_to_page(start), start, order);
1955
1956 start += (1UL << order);
1957 }
1958}
1959
1960static unsigned long __init __free_memory_core(phys_addr_t start,
1961 phys_addr_t end)
1962{
1963 unsigned long start_pfn = PFN_UP(start);
1964 unsigned long end_pfn = min_t(unsigned long,
1965 PFN_DOWN(end), max_low_pfn);
1966
1967 if (start_pfn >= end_pfn)
1968 return 0;
1969
1970 __free_pages_memory(start_pfn, end_pfn);
1971
1972 return end_pfn - start_pfn;
1973}
1974
1975static unsigned long __init free_low_memory_core_early(void)
1976{
1977 unsigned long count = 0;
1978 phys_addr_t start, end;
1979 u64 i;
1980
1981 memblock_clear_hotplug(0, -1);
1982
1983 for_each_reserved_mem_region(i, &start, &end)
1984 reserve_bootmem_region(start, end);
1985
1986
1987
1988
1989
1990
1991 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
1992 NULL)
1993 count += __free_memory_core(start, end);
1994
1995 return count;
1996}
1997
1998static int reset_managed_pages_done __initdata;
1999
2000void reset_node_managed_pages(pg_data_t *pgdat)
2001{
2002 struct zone *z;
2003
2004 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2005 atomic_long_set(&z->managed_pages, 0);
2006}
2007
2008void __init reset_all_zones_managed_pages(void)
2009{
2010 struct pglist_data *pgdat;
2011
2012 if (reset_managed_pages_done)
2013 return;
2014
2015 for_each_online_pgdat(pgdat)
2016 reset_node_managed_pages(pgdat);
2017
2018 reset_managed_pages_done = 1;
2019}
2020
2021
2022
2023
2024
2025
2026unsigned long __init memblock_free_all(void)
2027{
2028 unsigned long pages;
2029
2030 reset_all_zones_managed_pages();
2031
2032 pages = free_low_memory_core_early();
2033 totalram_pages_add(pages);
2034
2035 return pages;
2036}
2037
2038#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2039
2040static int memblock_debug_show(struct seq_file *m, void *private)
2041{
2042 struct memblock_type *type = m->private;
2043 struct memblock_region *reg;
2044 int i;
2045 phys_addr_t end;
2046
2047 for (i = 0; i < type->cnt; i++) {
2048 reg = &type->regions[i];
2049 end = reg->base + reg->size - 1;
2050
2051 seq_printf(m, "%4d: ", i);
2052 seq_printf(m, "%pa..%pa\n", ®->base, &end);
2053 }
2054 return 0;
2055}
2056DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2057
2058static int __init memblock_init_debugfs(void)
2059{
2060 struct dentry *root = debugfs_create_dir("memblock", NULL);
2061
2062 debugfs_create_file("memory", 0444, root,
2063 &memblock.memory, &memblock_debug_fops);
2064 debugfs_create_file("reserved", 0444, root,
2065 &memblock.reserved, &memblock_debug_fops);
2066#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2067 debugfs_create_file("physmem", 0444, root,
2068 &memblock.physmem, &memblock_debug_fops);
2069#endif
2070
2071 return 0;
2072}
2073__initcall(memblock_init_debugfs);
2074
2075#endif
2076