1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/bitops.h>
13#include <linux/poison.h>
14#include <linux/pfn.h>
15#include <linux/debugfs.h>
16#include <linux/kmemleak.h>
17#include <linux/seq_file.h>
18#include <linux/memblock.h>
19
20#include <asm/sections.h>
21#include <linux/io.h>
22
23#include "internal.h"
24
25#define INIT_MEMBLOCK_REGIONS 128
26#define INIT_PHYSMEM_REGIONS 4
27
28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30#endif
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#ifndef CONFIG_NEED_MULTIPLE_NODES
96struct pglist_data __refdata contig_page_data;
97EXPORT_SYMBOL(contig_page_data);
98#endif
99
100unsigned long max_low_pfn;
101unsigned long min_low_pfn;
102unsigned long max_pfn;
103unsigned long long max_possible_pfn;
104
105static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
106static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
107#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
108static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
109#endif
110
111struct memblock memblock __initdata_memblock = {
112 .memory.regions = memblock_memory_init_regions,
113 .memory.cnt = 1,
114 .memory.max = INIT_MEMBLOCK_REGIONS,
115 .memory.name = "memory",
116
117 .reserved.regions = memblock_reserved_init_regions,
118 .reserved.cnt = 1,
119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
120 .reserved.name = "reserved",
121
122 .bottom_up = false,
123 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
124};
125
126#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127struct memblock_type physmem = {
128 .regions = memblock_physmem_init_regions,
129 .cnt = 1,
130 .max = INIT_PHYSMEM_REGIONS,
131 .name = "physmem",
132};
133#endif
134
135
136
137
138
139
140
141static __refdata struct memblock_type *memblock_memory = &memblock.memory;
142
143#define for_each_memblock_type(i, memblock_type, rgn) \
144 for (i = 0, rgn = &memblock_type->regions[0]; \
145 i < memblock_type->cnt; \
146 i++, rgn = &memblock_type->regions[i])
147
148#define memblock_dbg(fmt, ...) \
149 do { \
150 if (memblock_debug) \
151 pr_info(fmt, ##__VA_ARGS__); \
152 } while (0)
153
154static int memblock_debug __initdata_memblock;
155static bool system_has_some_mirror __initdata_memblock = false;
156static int memblock_can_resize __initdata_memblock;
157static int memblock_memory_in_slab __initdata_memblock = 0;
158static int memblock_reserved_in_slab __initdata_memblock = 0;
159
160static enum memblock_flags __init_memblock choose_memblock_flags(void)
161{
162 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
163}
164
165
166static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
167{
168 return *size = min(*size, PHYS_ADDR_MAX - base);
169}
170
171
172
173
174static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
175 phys_addr_t base2, phys_addr_t size2)
176{
177 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
178}
179
180bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
181 phys_addr_t base, phys_addr_t size)
182{
183 unsigned long i;
184
185 for (i = 0; i < type->cnt; i++)
186 if (memblock_addrs_overlap(base, size, type->regions[i].base,
187 type->regions[i].size))
188 break;
189 return i < type->cnt;
190}
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207static phys_addr_t __init_memblock
208__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
209 phys_addr_t size, phys_addr_t align, int nid,
210 enum memblock_flags flags)
211{
212 phys_addr_t this_start, this_end, cand;
213 u64 i;
214
215 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
216 this_start = clamp(this_start, start, end);
217 this_end = clamp(this_end, start, end);
218
219 cand = round_up(this_start, align);
220 if (cand < this_end && this_end - cand >= size)
221 return cand;
222 }
223
224 return 0;
225}
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242static phys_addr_t __init_memblock
243__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
244 phys_addr_t size, phys_addr_t align, int nid,
245 enum memblock_flags flags)
246{
247 phys_addr_t this_start, this_end, cand;
248 u64 i;
249
250 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
251 NULL) {
252 this_start = clamp(this_start, start, end);
253 this_end = clamp(this_end, start, end);
254
255 if (this_end < size)
256 continue;
257
258 cand = round_down(this_end - size, align);
259 if (cand >= this_start)
260 return cand;
261 }
262
263 return 0;
264}
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
282 phys_addr_t align, phys_addr_t start,
283 phys_addr_t end, int nid,
284 enum memblock_flags flags)
285{
286
287 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
288 end == MEMBLOCK_ALLOC_KASAN)
289 end = memblock.current_limit;
290
291
292 start = max_t(phys_addr_t, start, PAGE_SIZE);
293 end = max(start, end);
294
295 if (memblock_bottom_up())
296 return __memblock_find_range_bottom_up(start, end, size, align,
297 nid, flags);
298 else
299 return __memblock_find_range_top_down(start, end, size, align,
300 nid, flags);
301}
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
317 phys_addr_t end, phys_addr_t size,
318 phys_addr_t align)
319{
320 phys_addr_t ret;
321 enum memblock_flags flags = choose_memblock_flags();
322
323again:
324 ret = memblock_find_in_range_node(size, align, start, end,
325 NUMA_NO_NODE, flags);
326
327 if (!ret && (flags & MEMBLOCK_MIRROR)) {
328 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
329 &size);
330 flags &= ~MEMBLOCK_MIRROR;
331 goto again;
332 }
333
334 return ret;
335}
336
337static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
338{
339 type->total_size -= type->regions[r].size;
340 memmove(&type->regions[r], &type->regions[r + 1],
341 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
342 type->cnt--;
343
344
345 if (type->cnt == 0) {
346 WARN_ON(type->total_size != 0);
347 type->cnt = 1;
348 type->regions[0].base = 0;
349 type->regions[0].size = 0;
350 type->regions[0].flags = 0;
351 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
352 }
353}
354
355#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
356
357
358
359void __init memblock_discard(void)
360{
361 phys_addr_t addr, size;
362
363 if (memblock.reserved.regions != memblock_reserved_init_regions) {
364 addr = __pa(memblock.reserved.regions);
365 size = PAGE_ALIGN(sizeof(struct memblock_region) *
366 memblock.reserved.max);
367 __memblock_free_late(addr, size);
368 }
369
370 if (memblock.memory.regions != memblock_memory_init_regions) {
371 addr = __pa(memblock.memory.regions);
372 size = PAGE_ALIGN(sizeof(struct memblock_region) *
373 memblock.memory.max);
374 __memblock_free_late(addr, size);
375 }
376
377 memblock_memory = NULL;
378}
379#endif
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396static int __init_memblock memblock_double_array(struct memblock_type *type,
397 phys_addr_t new_area_start,
398 phys_addr_t new_area_size)
399{
400 struct memblock_region *new_array, *old_array;
401 phys_addr_t old_alloc_size, new_alloc_size;
402 phys_addr_t old_size, new_size, addr, new_end;
403 int use_slab = slab_is_available();
404 int *in_slab;
405
406
407
408
409 if (!memblock_can_resize)
410 return -1;
411
412
413 old_size = type->max * sizeof(struct memblock_region);
414 new_size = old_size << 1;
415
416
417
418
419 old_alloc_size = PAGE_ALIGN(old_size);
420 new_alloc_size = PAGE_ALIGN(new_size);
421
422
423 if (type == &memblock.memory)
424 in_slab = &memblock_memory_in_slab;
425 else
426 in_slab = &memblock_reserved_in_slab;
427
428
429 if (use_slab) {
430 new_array = kmalloc(new_size, GFP_KERNEL);
431 addr = new_array ? __pa(new_array) : 0;
432 } else {
433
434 if (type != &memblock.reserved)
435 new_area_start = new_area_size = 0;
436
437 addr = memblock_find_in_range(new_area_start + new_area_size,
438 memblock.current_limit,
439 new_alloc_size, PAGE_SIZE);
440 if (!addr && new_area_size)
441 addr = memblock_find_in_range(0,
442 min(new_area_start, memblock.current_limit),
443 new_alloc_size, PAGE_SIZE);
444
445 new_array = addr ? __va(addr) : NULL;
446 }
447 if (!addr) {
448 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
449 type->name, type->max, type->max * 2);
450 return -1;
451 }
452
453 new_end = addr + new_size - 1;
454 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
455 type->name, type->max * 2, &addr, &new_end);
456
457
458
459
460
461
462 memcpy(new_array, type->regions, old_size);
463 memset(new_array + type->max, 0, old_size);
464 old_array = type->regions;
465 type->regions = new_array;
466 type->max <<= 1;
467
468
469 if (*in_slab)
470 kfree(old_array);
471 else if (old_array != memblock_memory_init_regions &&
472 old_array != memblock_reserved_init_regions)
473 memblock_free(__pa(old_array), old_alloc_size);
474
475
476
477
478
479 if (!use_slab)
480 BUG_ON(memblock_reserve(addr, new_alloc_size));
481
482
483 *in_slab = use_slab;
484
485 return 0;
486}
487
488
489
490
491
492
493
494static void __init_memblock memblock_merge_regions(struct memblock_type *type)
495{
496 int i = 0;
497
498
499 while (i < type->cnt - 1) {
500 struct memblock_region *this = &type->regions[i];
501 struct memblock_region *next = &type->regions[i + 1];
502
503 if (this->base + this->size != next->base ||
504 memblock_get_region_node(this) !=
505 memblock_get_region_node(next) ||
506 this->flags != next->flags) {
507 BUG_ON(this->base + this->size > next->base);
508 i++;
509 continue;
510 }
511
512 this->size += next->size;
513
514 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
515 type->cnt--;
516 }
517}
518
519
520
521
522
523
524
525
526
527
528
529
530
531static void __init_memblock memblock_insert_region(struct memblock_type *type,
532 int idx, phys_addr_t base,
533 phys_addr_t size,
534 int nid,
535 enum memblock_flags flags)
536{
537 struct memblock_region *rgn = &type->regions[idx];
538
539 BUG_ON(type->cnt >= type->max);
540 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
541 rgn->base = base;
542 rgn->size = size;
543 rgn->flags = flags;
544 memblock_set_region_node(rgn, nid);
545 type->cnt++;
546 type->total_size += size;
547}
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565static int __init_memblock memblock_add_range(struct memblock_type *type,
566 phys_addr_t base, phys_addr_t size,
567 int nid, enum memblock_flags flags)
568{
569 bool insert = false;
570 phys_addr_t obase = base;
571 phys_addr_t end = base + memblock_cap_size(base, &size);
572 int idx, nr_new;
573 struct memblock_region *rgn;
574
575 if (!size)
576 return 0;
577
578
579 if (type->regions[0].size == 0) {
580 WARN_ON(type->cnt != 1 || type->total_size);
581 type->regions[0].base = base;
582 type->regions[0].size = size;
583 type->regions[0].flags = flags;
584 memblock_set_region_node(&type->regions[0], nid);
585 type->total_size = size;
586 return 0;
587 }
588repeat:
589
590
591
592
593
594 base = obase;
595 nr_new = 0;
596
597 for_each_memblock_type(idx, type, rgn) {
598 phys_addr_t rbase = rgn->base;
599 phys_addr_t rend = rbase + rgn->size;
600
601 if (rbase >= end)
602 break;
603 if (rend <= base)
604 continue;
605
606
607
608
609 if (rbase > base) {
610#ifdef CONFIG_NEED_MULTIPLE_NODES
611 WARN_ON(nid != memblock_get_region_node(rgn));
612#endif
613 WARN_ON(flags != rgn->flags);
614 nr_new++;
615 if (insert)
616 memblock_insert_region(type, idx++, base,
617 rbase - base, nid,
618 flags);
619 }
620
621 base = min(rend, end);
622 }
623
624
625 if (base < end) {
626 nr_new++;
627 if (insert)
628 memblock_insert_region(type, idx, base, end - base,
629 nid, flags);
630 }
631
632 if (!nr_new)
633 return 0;
634
635
636
637
638
639 if (!insert) {
640 while (type->cnt + nr_new > type->max)
641 if (memblock_double_array(type, obase, size) < 0)
642 return -ENOMEM;
643 insert = true;
644 goto repeat;
645 } else {
646 memblock_merge_regions(type);
647 return 0;
648 }
649}
650
651
652
653
654
655
656
657
658
659
660
661
662
663int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
664 int nid)
665{
666 return memblock_add_range(&memblock.memory, base, size, nid, 0);
667}
668
669
670
671
672
673
674
675
676
677
678
679
680int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
681{
682 phys_addr_t end = base + size - 1;
683
684 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
685 &base, &end, (void *)_RET_IP_);
686
687 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
688}
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706static int __init_memblock memblock_isolate_range(struct memblock_type *type,
707 phys_addr_t base, phys_addr_t size,
708 int *start_rgn, int *end_rgn)
709{
710 phys_addr_t end = base + memblock_cap_size(base, &size);
711 int idx;
712 struct memblock_region *rgn;
713
714 *start_rgn = *end_rgn = 0;
715
716 if (!size)
717 return 0;
718
719
720 while (type->cnt + 2 > type->max)
721 if (memblock_double_array(type, base, size) < 0)
722 return -ENOMEM;
723
724 for_each_memblock_type(idx, type, rgn) {
725 phys_addr_t rbase = rgn->base;
726 phys_addr_t rend = rbase + rgn->size;
727
728 if (rbase >= end)
729 break;
730 if (rend <= base)
731 continue;
732
733 if (rbase < base) {
734
735
736
737
738 rgn->base = base;
739 rgn->size -= base - rbase;
740 type->total_size -= base - rbase;
741 memblock_insert_region(type, idx, rbase, base - rbase,
742 memblock_get_region_node(rgn),
743 rgn->flags);
744 } else if (rend > end) {
745
746
747
748
749 rgn->base = end;
750 rgn->size -= end - rbase;
751 type->total_size -= end - rbase;
752 memblock_insert_region(type, idx--, rbase, end - rbase,
753 memblock_get_region_node(rgn),
754 rgn->flags);
755 } else {
756
757 if (!*end_rgn)
758 *start_rgn = idx;
759 *end_rgn = idx + 1;
760 }
761 }
762
763 return 0;
764}
765
766static int __init_memblock memblock_remove_range(struct memblock_type *type,
767 phys_addr_t base, phys_addr_t size)
768{
769 int start_rgn, end_rgn;
770 int i, ret;
771
772 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
773 if (ret)
774 return ret;
775
776 for (i = end_rgn - 1; i >= start_rgn; i--)
777 memblock_remove_region(type, i);
778 return 0;
779}
780
781int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
782{
783 phys_addr_t end = base + size - 1;
784
785 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
786 &base, &end, (void *)_RET_IP_);
787
788 return memblock_remove_range(&memblock.memory, base, size);
789}
790
791
792
793
794
795
796
797
798
799int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
800{
801 phys_addr_t end = base + size - 1;
802
803 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
804 &base, &end, (void *)_RET_IP_);
805
806 kmemleak_free_part_phys(base, size);
807 return memblock_remove_range(&memblock.reserved, base, size);
808}
809
810int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
811{
812 phys_addr_t end = base + size - 1;
813
814 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
815 &base, &end, (void *)_RET_IP_);
816
817 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
818}
819
820#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
821int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
822{
823 phys_addr_t end = base + size - 1;
824
825 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
826 &base, &end, (void *)_RET_IP_);
827
828 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
829}
830#endif
831
832
833
834
835
836
837
838
839
840
841
842
843static int __init_memblock memblock_setclr_flag(phys_addr_t base,
844 phys_addr_t size, int set, int flag)
845{
846 struct memblock_type *type = &memblock.memory;
847 int i, ret, start_rgn, end_rgn;
848
849 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
850 if (ret)
851 return ret;
852
853 for (i = start_rgn; i < end_rgn; i++) {
854 struct memblock_region *r = &type->regions[i];
855
856 if (set)
857 r->flags |= flag;
858 else
859 r->flags &= ~flag;
860 }
861
862 memblock_merge_regions(type);
863 return 0;
864}
865
866
867
868
869
870
871
872
873int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
874{
875 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
876}
877
878
879
880
881
882
883
884
885int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
886{
887 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
888}
889
890
891
892
893
894
895
896
897int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
898{
899 system_has_some_mirror = true;
900
901 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
902}
903
904
905
906
907
908
909
910
911int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
912{
913 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
914}
915
916
917
918
919
920
921
922
923int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
924{
925 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
926}
927
928static bool should_skip_region(struct memblock_type *type,
929 struct memblock_region *m,
930 int nid, int flags)
931{
932 int m_nid = memblock_get_region_node(m);
933
934
935 if (type != memblock_memory)
936 return false;
937
938
939 if (nid != NUMA_NO_NODE && nid != m_nid)
940 return true;
941
942
943 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
944 return true;
945
946
947 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
948 return true;
949
950
951 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
952 return true;
953
954 return false;
955}
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
984 struct memblock_type *type_a,
985 struct memblock_type *type_b, phys_addr_t *out_start,
986 phys_addr_t *out_end, int *out_nid)
987{
988 int idx_a = *idx & 0xffffffff;
989 int idx_b = *idx >> 32;
990
991 if (WARN_ONCE(nid == MAX_NUMNODES,
992 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
993 nid = NUMA_NO_NODE;
994
995 for (; idx_a < type_a->cnt; idx_a++) {
996 struct memblock_region *m = &type_a->regions[idx_a];
997
998 phys_addr_t m_start = m->base;
999 phys_addr_t m_end = m->base + m->size;
1000 int m_nid = memblock_get_region_node(m);
1001
1002 if (should_skip_region(type_a, m, nid, flags))
1003 continue;
1004
1005 if (!type_b) {
1006 if (out_start)
1007 *out_start = m_start;
1008 if (out_end)
1009 *out_end = m_end;
1010 if (out_nid)
1011 *out_nid = m_nid;
1012 idx_a++;
1013 *idx = (u32)idx_a | (u64)idx_b << 32;
1014 return;
1015 }
1016
1017
1018 for (; idx_b < type_b->cnt + 1; idx_b++) {
1019 struct memblock_region *r;
1020 phys_addr_t r_start;
1021 phys_addr_t r_end;
1022
1023 r = &type_b->regions[idx_b];
1024 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1025 r_end = idx_b < type_b->cnt ?
1026 r->base : PHYS_ADDR_MAX;
1027
1028
1029
1030
1031
1032 if (r_start >= m_end)
1033 break;
1034
1035 if (m_start < r_end) {
1036 if (out_start)
1037 *out_start =
1038 max(m_start, r_start);
1039 if (out_end)
1040 *out_end = min(m_end, r_end);
1041 if (out_nid)
1042 *out_nid = m_nid;
1043
1044
1045
1046
1047 if (m_end <= r_end)
1048 idx_a++;
1049 else
1050 idx_b++;
1051 *idx = (u32)idx_a | (u64)idx_b << 32;
1052 return;
1053 }
1054 }
1055 }
1056
1057
1058 *idx = ULLONG_MAX;
1059}
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1079 enum memblock_flags flags,
1080 struct memblock_type *type_a,
1081 struct memblock_type *type_b,
1082 phys_addr_t *out_start,
1083 phys_addr_t *out_end, int *out_nid)
1084{
1085 int idx_a = *idx & 0xffffffff;
1086 int idx_b = *idx >> 32;
1087
1088 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1089 nid = NUMA_NO_NODE;
1090
1091 if (*idx == (u64)ULLONG_MAX) {
1092 idx_a = type_a->cnt - 1;
1093 if (type_b != NULL)
1094 idx_b = type_b->cnt;
1095 else
1096 idx_b = 0;
1097 }
1098
1099 for (; idx_a >= 0; idx_a--) {
1100 struct memblock_region *m = &type_a->regions[idx_a];
1101
1102 phys_addr_t m_start = m->base;
1103 phys_addr_t m_end = m->base + m->size;
1104 int m_nid = memblock_get_region_node(m);
1105
1106 if (should_skip_region(type_a, m, nid, flags))
1107 continue;
1108
1109 if (!type_b) {
1110 if (out_start)
1111 *out_start = m_start;
1112 if (out_end)
1113 *out_end = m_end;
1114 if (out_nid)
1115 *out_nid = m_nid;
1116 idx_a--;
1117 *idx = (u32)idx_a | (u64)idx_b << 32;
1118 return;
1119 }
1120
1121
1122 for (; idx_b >= 0; idx_b--) {
1123 struct memblock_region *r;
1124 phys_addr_t r_start;
1125 phys_addr_t r_end;
1126
1127 r = &type_b->regions[idx_b];
1128 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1129 r_end = idx_b < type_b->cnt ?
1130 r->base : PHYS_ADDR_MAX;
1131
1132
1133
1134
1135
1136 if (r_end <= m_start)
1137 break;
1138
1139 if (m_end > r_start) {
1140 if (out_start)
1141 *out_start = max(m_start, r_start);
1142 if (out_end)
1143 *out_end = min(m_end, r_end);
1144 if (out_nid)
1145 *out_nid = m_nid;
1146 if (m_start >= r_start)
1147 idx_a--;
1148 else
1149 idx_b--;
1150 *idx = (u32)idx_a | (u64)idx_b << 32;
1151 return;
1152 }
1153 }
1154 }
1155
1156 *idx = ULLONG_MAX;
1157}
1158
1159
1160
1161
1162void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1163 unsigned long *out_start_pfn,
1164 unsigned long *out_end_pfn, int *out_nid)
1165{
1166 struct memblock_type *type = &memblock.memory;
1167 struct memblock_region *r;
1168 int r_nid;
1169
1170 while (++*idx < type->cnt) {
1171 r = &type->regions[*idx];
1172 r_nid = memblock_get_region_node(r);
1173
1174 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1175 continue;
1176 if (nid == MAX_NUMNODES || nid == r_nid)
1177 break;
1178 }
1179 if (*idx >= type->cnt) {
1180 *idx = -1;
1181 return;
1182 }
1183
1184 if (out_start_pfn)
1185 *out_start_pfn = PFN_UP(r->base);
1186 if (out_end_pfn)
1187 *out_end_pfn = PFN_DOWN(r->base + r->size);
1188 if (out_nid)
1189 *out_nid = r_nid;
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1206 struct memblock_type *type, int nid)
1207{
1208#ifdef CONFIG_NEED_MULTIPLE_NODES
1209 int start_rgn, end_rgn;
1210 int i, ret;
1211
1212 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1213 if (ret)
1214 return ret;
1215
1216 for (i = start_rgn; i < end_rgn; i++)
1217 memblock_set_region_node(&type->regions[i], nid);
1218
1219 memblock_merge_regions(type);
1220#endif
1221 return 0;
1222}
1223
1224#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240void __init_memblock
1241__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1242 unsigned long *out_spfn, unsigned long *out_epfn)
1243{
1244 int zone_nid = zone_to_nid(zone);
1245 phys_addr_t spa, epa;
1246 int nid;
1247
1248 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1249 &memblock.memory, &memblock.reserved,
1250 &spa, &epa, &nid);
1251
1252 while (*idx != U64_MAX) {
1253 unsigned long epfn = PFN_DOWN(epa);
1254 unsigned long spfn = PFN_UP(spa);
1255
1256
1257
1258
1259
1260 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1261
1262 if (zone_end_pfn(zone) <= spfn) {
1263 *idx = U64_MAX;
1264 break;
1265 }
1266
1267 if (out_spfn)
1268 *out_spfn = max(zone->zone_start_pfn, spfn);
1269 if (out_epfn)
1270 *out_epfn = min(zone_end_pfn(zone), epfn);
1271
1272 return;
1273 }
1274
1275 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1276 &memblock.memory, &memblock.reserved,
1277 &spa, &epa, &nid);
1278 }
1279
1280
1281 if (out_spfn)
1282 *out_spfn = ULONG_MAX;
1283 if (out_epfn)
1284 *out_epfn = 0;
1285}
1286
1287#endif
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1315 phys_addr_t align, phys_addr_t start,
1316 phys_addr_t end, int nid,
1317 bool exact_nid)
1318{
1319 enum memblock_flags flags = choose_memblock_flags();
1320 phys_addr_t found;
1321
1322 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1323 nid = NUMA_NO_NODE;
1324
1325 if (!align) {
1326
1327 dump_stack();
1328 align = SMP_CACHE_BYTES;
1329 }
1330
1331again:
1332 found = memblock_find_in_range_node(size, align, start, end, nid,
1333 flags);
1334 if (found && !memblock_reserve(found, size))
1335 goto done;
1336
1337 if (nid != NUMA_NO_NODE && !exact_nid) {
1338 found = memblock_find_in_range_node(size, align, start,
1339 end, NUMA_NO_NODE,
1340 flags);
1341 if (found && !memblock_reserve(found, size))
1342 goto done;
1343 }
1344
1345 if (flags & MEMBLOCK_MIRROR) {
1346 flags &= ~MEMBLOCK_MIRROR;
1347 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1348 &size);
1349 goto again;
1350 }
1351
1352 return 0;
1353
1354done:
1355
1356 if (end != MEMBLOCK_ALLOC_KASAN)
1357
1358
1359
1360
1361
1362
1363 kmemleak_alloc_phys(found, size, 0, 0);
1364
1365 return found;
1366}
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1381 phys_addr_t align,
1382 phys_addr_t start,
1383 phys_addr_t end)
1384{
1385 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1386 __func__, (u64)size, (u64)align, &start, &end,
1387 (void *)_RET_IP_);
1388 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1389 false);
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1406{
1407 return memblock_alloc_range_nid(size, align, 0,
1408 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1409}
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static void * __init memblock_alloc_internal(
1432 phys_addr_t size, phys_addr_t align,
1433 phys_addr_t min_addr, phys_addr_t max_addr,
1434 int nid, bool exact_nid)
1435{
1436 phys_addr_t alloc;
1437
1438
1439
1440
1441
1442
1443 if (WARN_ON_ONCE(slab_is_available()))
1444 return kzalloc_node(size, GFP_NOWAIT, nid);
1445
1446 if (max_addr > memblock.current_limit)
1447 max_addr = memblock.current_limit;
1448
1449 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1450 exact_nid);
1451
1452
1453 if (!alloc && min_addr)
1454 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1455 exact_nid);
1456
1457 if (!alloc)
1458 return NULL;
1459
1460 return phys_to_virt(alloc);
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481void * __init memblock_alloc_exact_nid_raw(
1482 phys_addr_t size, phys_addr_t align,
1483 phys_addr_t min_addr, phys_addr_t max_addr,
1484 int nid)
1485{
1486 void *ptr;
1487
1488 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1489 __func__, (u64)size, (u64)align, nid, &min_addr,
1490 &max_addr, (void *)_RET_IP_);
1491
1492 ptr = memblock_alloc_internal(size, align,
1493 min_addr, max_addr, nid, true);
1494 if (ptr && size > 0)
1495 page_init_poison(ptr, size);
1496
1497 return ptr;
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519void * __init memblock_alloc_try_nid_raw(
1520 phys_addr_t size, phys_addr_t align,
1521 phys_addr_t min_addr, phys_addr_t max_addr,
1522 int nid)
1523{
1524 void *ptr;
1525
1526 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1527 __func__, (u64)size, (u64)align, nid, &min_addr,
1528 &max_addr, (void *)_RET_IP_);
1529
1530 ptr = memblock_alloc_internal(size, align,
1531 min_addr, max_addr, nid, false);
1532 if (ptr && size > 0)
1533 page_init_poison(ptr, size);
1534
1535 return ptr;
1536}
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555void * __init memblock_alloc_try_nid(
1556 phys_addr_t size, phys_addr_t align,
1557 phys_addr_t min_addr, phys_addr_t max_addr,
1558 int nid)
1559{
1560 void *ptr;
1561
1562 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1563 __func__, (u64)size, (u64)align, nid, &min_addr,
1564 &max_addr, (void *)_RET_IP_);
1565 ptr = memblock_alloc_internal(size, align,
1566 min_addr, max_addr, nid, false);
1567 if (ptr)
1568 memset(ptr, 0, size);
1569
1570 return ptr;
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1583{
1584 phys_addr_t cursor, end;
1585
1586 end = base + size - 1;
1587 memblock_dbg("%s: [%pa-%pa] %pS\n",
1588 __func__, &base, &end, (void *)_RET_IP_);
1589 kmemleak_free_part_phys(base, size);
1590 cursor = PFN_UP(base);
1591 end = PFN_DOWN(base + size);
1592
1593 for (; cursor < end; cursor++) {
1594 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1595 totalram_pages_inc();
1596 }
1597}
1598
1599
1600
1601
1602
1603phys_addr_t __init_memblock memblock_phys_mem_size(void)
1604{
1605 return memblock.memory.total_size;
1606}
1607
1608phys_addr_t __init_memblock memblock_reserved_size(void)
1609{
1610 return memblock.reserved.total_size;
1611}
1612
1613
1614phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1615{
1616 return memblock.memory.regions[0].base;
1617}
1618
1619phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1620{
1621 int idx = memblock.memory.cnt - 1;
1622
1623 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1624}
1625
1626static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1627{
1628 phys_addr_t max_addr = PHYS_ADDR_MAX;
1629 struct memblock_region *r;
1630
1631
1632
1633
1634
1635
1636 for_each_mem_region(r) {
1637 if (limit <= r->size) {
1638 max_addr = r->base + limit;
1639 break;
1640 }
1641 limit -= r->size;
1642 }
1643
1644 return max_addr;
1645}
1646
1647void __init memblock_enforce_memory_limit(phys_addr_t limit)
1648{
1649 phys_addr_t max_addr;
1650
1651 if (!limit)
1652 return;
1653
1654 max_addr = __find_max_addr(limit);
1655
1656
1657 if (max_addr == PHYS_ADDR_MAX)
1658 return;
1659
1660
1661 memblock_remove_range(&memblock.memory, max_addr,
1662 PHYS_ADDR_MAX);
1663 memblock_remove_range(&memblock.reserved, max_addr,
1664 PHYS_ADDR_MAX);
1665}
1666
1667void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1668{
1669 int start_rgn, end_rgn;
1670 int i, ret;
1671
1672 if (!size)
1673 return;
1674
1675 ret = memblock_isolate_range(&memblock.memory, base, size,
1676 &start_rgn, &end_rgn);
1677 if (ret)
1678 return;
1679
1680
1681 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1682 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1683 memblock_remove_region(&memblock.memory, i);
1684
1685 for (i = start_rgn - 1; i >= 0; i--)
1686 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1687 memblock_remove_region(&memblock.memory, i);
1688
1689
1690 memblock_remove_range(&memblock.reserved, 0, base);
1691 memblock_remove_range(&memblock.reserved,
1692 base + size, PHYS_ADDR_MAX);
1693}
1694
1695void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1696{
1697 phys_addr_t max_addr;
1698
1699 if (!limit)
1700 return;
1701
1702 max_addr = __find_max_addr(limit);
1703
1704
1705 if (max_addr == PHYS_ADDR_MAX)
1706 return;
1707
1708 memblock_cap_memory_range(0, max_addr);
1709}
1710
1711static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1712{
1713 unsigned int left = 0, right = type->cnt;
1714
1715 do {
1716 unsigned int mid = (right + left) / 2;
1717
1718 if (addr < type->regions[mid].base)
1719 right = mid;
1720 else if (addr >= (type->regions[mid].base +
1721 type->regions[mid].size))
1722 left = mid + 1;
1723 else
1724 return mid;
1725 } while (left < right);
1726 return -1;
1727}
1728
1729bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1730{
1731 return memblock_search(&memblock.reserved, addr) != -1;
1732}
1733
1734bool __init_memblock memblock_is_memory(phys_addr_t addr)
1735{
1736 return memblock_search(&memblock.memory, addr) != -1;
1737}
1738
1739bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1740{
1741 int i = memblock_search(&memblock.memory, addr);
1742
1743 if (i == -1)
1744 return false;
1745 return !memblock_is_nomap(&memblock.memory.regions[i]);
1746}
1747
1748int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1749 unsigned long *start_pfn, unsigned long *end_pfn)
1750{
1751 struct memblock_type *type = &memblock.memory;
1752 int mid = memblock_search(type, PFN_PHYS(pfn));
1753
1754 if (mid == -1)
1755 return -1;
1756
1757 *start_pfn = PFN_DOWN(type->regions[mid].base);
1758 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1759
1760 return memblock_get_region_node(&type->regions[mid]);
1761}
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1774{
1775 int idx = memblock_search(&memblock.memory, base);
1776 phys_addr_t end = base + memblock_cap_size(base, &size);
1777
1778 if (idx == -1)
1779 return false;
1780 return (memblock.memory.regions[idx].base +
1781 memblock.memory.regions[idx].size) >= end;
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1796{
1797 memblock_cap_size(base, &size);
1798 return memblock_overlaps_region(&memblock.reserved, base, size);
1799}
1800
1801void __init_memblock memblock_trim_memory(phys_addr_t align)
1802{
1803 phys_addr_t start, end, orig_start, orig_end;
1804 struct memblock_region *r;
1805
1806 for_each_mem_region(r) {
1807 orig_start = r->base;
1808 orig_end = r->base + r->size;
1809 start = round_up(orig_start, align);
1810 end = round_down(orig_end, align);
1811
1812 if (start == orig_start && end == orig_end)
1813 continue;
1814
1815 if (start < end) {
1816 r->base = start;
1817 r->size = end - start;
1818 } else {
1819 memblock_remove_region(&memblock.memory,
1820 r - memblock.memory.regions);
1821 r--;
1822 }
1823 }
1824}
1825
1826void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1827{
1828 memblock.current_limit = limit;
1829}
1830
1831phys_addr_t __init_memblock memblock_get_current_limit(void)
1832{
1833 return memblock.current_limit;
1834}
1835
1836static void __init_memblock memblock_dump(struct memblock_type *type)
1837{
1838 phys_addr_t base, end, size;
1839 enum memblock_flags flags;
1840 int idx;
1841 struct memblock_region *rgn;
1842
1843 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1844
1845 for_each_memblock_type(idx, type, rgn) {
1846 char nid_buf[32] = "";
1847
1848 base = rgn->base;
1849 size = rgn->size;
1850 end = base + size - 1;
1851 flags = rgn->flags;
1852#ifdef CONFIG_NEED_MULTIPLE_NODES
1853 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1854 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1855 memblock_get_region_node(rgn));
1856#endif
1857 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1858 type->name, idx, &base, &end, &size, nid_buf, flags);
1859 }
1860}
1861
1862static void __init_memblock __memblock_dump_all(void)
1863{
1864 pr_info("MEMBLOCK configuration:\n");
1865 pr_info(" memory size = %pa reserved size = %pa\n",
1866 &memblock.memory.total_size,
1867 &memblock.reserved.total_size);
1868
1869 memblock_dump(&memblock.memory);
1870 memblock_dump(&memblock.reserved);
1871#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1872 memblock_dump(&physmem);
1873#endif
1874}
1875
1876void __init_memblock memblock_dump_all(void)
1877{
1878 if (memblock_debug)
1879 __memblock_dump_all();
1880}
1881
1882void __init memblock_allow_resize(void)
1883{
1884 memblock_can_resize = 1;
1885}
1886
1887static int __init early_memblock(char *p)
1888{
1889 if (p && strstr(p, "debug"))
1890 memblock_debug = 1;
1891 return 0;
1892}
1893early_param("memblock", early_memblock);
1894
1895static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1896{
1897 struct page *start_pg, *end_pg;
1898 phys_addr_t pg, pgend;
1899
1900
1901
1902
1903 start_pg = pfn_to_page(start_pfn - 1) + 1;
1904 end_pg = pfn_to_page(end_pfn - 1) + 1;
1905
1906
1907
1908
1909
1910 pg = PAGE_ALIGN(__pa(start_pg));
1911 pgend = __pa(end_pg) & PAGE_MASK;
1912
1913
1914
1915
1916
1917 if (pg < pgend)
1918 memblock_free(pg, pgend - pg);
1919}
1920
1921
1922
1923
1924static void __init free_unused_memmap(void)
1925{
1926 unsigned long start, end, prev_end = 0;
1927 int i;
1928
1929 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1930 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1931 return;
1932
1933
1934
1935
1936
1937 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1938#ifdef CONFIG_SPARSEMEM
1939
1940
1941
1942
1943 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1944#else
1945
1946
1947
1948
1949
1950 start = round_down(start, MAX_ORDER_NR_PAGES);
1951#endif
1952
1953
1954
1955
1956
1957 if (prev_end && prev_end < start)
1958 free_memmap(prev_end, start);
1959
1960
1961
1962
1963
1964
1965 prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
1966 }
1967
1968#ifdef CONFIG_SPARSEMEM
1969 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
1970 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
1971#endif
1972}
1973
1974static void __init __free_pages_memory(unsigned long start, unsigned long end)
1975{
1976 int order;
1977
1978 while (start < end) {
1979 order = min(MAX_ORDER - 1UL, __ffs(start));
1980
1981 while (start + (1UL << order) > end)
1982 order--;
1983
1984 memblock_free_pages(pfn_to_page(start), start, order);
1985
1986 start += (1UL << order);
1987 }
1988}
1989
1990static unsigned long __init __free_memory_core(phys_addr_t start,
1991 phys_addr_t end)
1992{
1993 unsigned long start_pfn = PFN_UP(start);
1994 unsigned long end_pfn = min_t(unsigned long,
1995 PFN_DOWN(end), max_low_pfn);
1996
1997 if (start_pfn >= end_pfn)
1998 return 0;
1999
2000 __free_pages_memory(start_pfn, end_pfn);
2001
2002 return end_pfn - start_pfn;
2003}
2004
2005static unsigned long __init free_low_memory_core_early(void)
2006{
2007 unsigned long count = 0;
2008 phys_addr_t start, end;
2009 u64 i;
2010
2011 memblock_clear_hotplug(0, -1);
2012
2013 for_each_reserved_mem_range(i, &start, &end)
2014 reserve_bootmem_region(start, end);
2015
2016
2017
2018
2019
2020
2021 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2022 NULL)
2023 count += __free_memory_core(start, end);
2024
2025 return count;
2026}
2027
2028static int reset_managed_pages_done __initdata;
2029
2030void reset_node_managed_pages(pg_data_t *pgdat)
2031{
2032 struct zone *z;
2033
2034 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2035 atomic_long_set(&z->managed_pages, 0);
2036}
2037
2038void __init reset_all_zones_managed_pages(void)
2039{
2040 struct pglist_data *pgdat;
2041
2042 if (reset_managed_pages_done)
2043 return;
2044
2045 for_each_online_pgdat(pgdat)
2046 reset_node_managed_pages(pgdat);
2047
2048 reset_managed_pages_done = 1;
2049}
2050
2051
2052
2053
2054void __init memblock_free_all(void)
2055{
2056 unsigned long pages;
2057
2058 free_unused_memmap();
2059 reset_all_zones_managed_pages();
2060
2061 pages = free_low_memory_core_early();
2062 totalram_pages_add(pages);
2063}
2064
2065#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2066
2067static int memblock_debug_show(struct seq_file *m, void *private)
2068{
2069 struct memblock_type *type = m->private;
2070 struct memblock_region *reg;
2071 int i;
2072 phys_addr_t end;
2073
2074 for (i = 0; i < type->cnt; i++) {
2075 reg = &type->regions[i];
2076 end = reg->base + reg->size - 1;
2077
2078 seq_printf(m, "%4d: ", i);
2079 seq_printf(m, "%pa..%pa\n", ®->base, &end);
2080 }
2081 return 0;
2082}
2083DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2084
2085static int __init memblock_init_debugfs(void)
2086{
2087 struct dentry *root = debugfs_create_dir("memblock", NULL);
2088
2089 debugfs_create_file("memory", 0444, root,
2090 &memblock.memory, &memblock_debug_fops);
2091 debugfs_create_file("reserved", 0444, root,
2092 &memblock.reserved, &memblock_debug_fops);
2093#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2094 debugfs_create_file("physmem", 0444, root, &physmem,
2095 &memblock_debug_fops);
2096#endif
2097
2098 return 0;
2099}
2100__initcall(memblock_init_debugfs);
2101
2102#endif
2103