1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/bitops.h>
13#include <linux/poison.h>
14#include <linux/pfn.h>
15#include <linux/debugfs.h>
16#include <linux/kmemleak.h>
17#include <linux/seq_file.h>
18#include <linux/memblock.h>
19
20#include <asm/sections.h>
21#include <linux/io.h>
22
23#include "internal.h"
24
25#define INIT_MEMBLOCK_REGIONS 128
26#define INIT_PHYSMEM_REGIONS 4
27
28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30#endif
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#ifndef CONFIG_NUMA
96struct pglist_data __refdata contig_page_data;
97EXPORT_SYMBOL(contig_page_data);
98#endif
99
100unsigned long max_low_pfn;
101unsigned long min_low_pfn;
102unsigned long max_pfn;
103unsigned long long max_possible_pfn;
104
105static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
106static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
107#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
108static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
109#endif
110
111struct memblock memblock __initdata_memblock = {
112 .memory.regions = memblock_memory_init_regions,
113 .memory.cnt = 1,
114 .memory.max = INIT_MEMBLOCK_REGIONS,
115 .memory.name = "memory",
116
117 .reserved.regions = memblock_reserved_init_regions,
118 .reserved.cnt = 1,
119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
120 .reserved.name = "reserved",
121
122 .bottom_up = false,
123 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
124};
125
126#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127struct memblock_type physmem = {
128 .regions = memblock_physmem_init_regions,
129 .cnt = 1,
130 .max = INIT_PHYSMEM_REGIONS,
131 .name = "physmem",
132};
133#endif
134
135
136
137
138
139
140
141static __refdata struct memblock_type *memblock_memory = &memblock.memory;
142
143#define for_each_memblock_type(i, memblock_type, rgn) \
144 for (i = 0, rgn = &memblock_type->regions[0]; \
145 i < memblock_type->cnt; \
146 i++, rgn = &memblock_type->regions[i])
147
148#define memblock_dbg(fmt, ...) \
149 do { \
150 if (memblock_debug) \
151 pr_info(fmt, ##__VA_ARGS__); \
152 } while (0)
153
154static int memblock_debug __initdata_memblock;
155static bool system_has_some_mirror __initdata_memblock = false;
156static int memblock_can_resize __initdata_memblock;
157static int memblock_memory_in_slab __initdata_memblock = 0;
158static int memblock_reserved_in_slab __initdata_memblock = 0;
159
160static enum memblock_flags __init_memblock choose_memblock_flags(void)
161{
162 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
163}
164
165
166static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
167{
168 return *size = min(*size, PHYS_ADDR_MAX - base);
169}
170
171
172
173
174static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
175 phys_addr_t base2, phys_addr_t size2)
176{
177 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
178}
179
180bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
181 phys_addr_t base, phys_addr_t size)
182{
183 unsigned long i;
184
185 memblock_cap_size(base, &size);
186
187 for (i = 0; i < type->cnt; i++)
188 if (memblock_addrs_overlap(base, size, type->regions[i].base,
189 type->regions[i].size))
190 break;
191 return i < type->cnt;
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209static phys_addr_t __init_memblock
210__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
211 phys_addr_t size, phys_addr_t align, int nid,
212 enum memblock_flags flags)
213{
214 phys_addr_t this_start, this_end, cand;
215 u64 i;
216
217 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
218 this_start = clamp(this_start, start, end);
219 this_end = clamp(this_end, start, end);
220
221 cand = round_up(this_start, align);
222 if (cand < this_end && this_end - cand >= size)
223 return cand;
224 }
225
226 return 0;
227}
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244static phys_addr_t __init_memblock
245__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
246 phys_addr_t size, phys_addr_t align, int nid,
247 enum memblock_flags flags)
248{
249 phys_addr_t this_start, this_end, cand;
250 u64 i;
251
252 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
253 NULL) {
254 this_start = clamp(this_start, start, end);
255 this_end = clamp(this_end, start, end);
256
257 if (this_end < size)
258 continue;
259
260 cand = round_down(this_end - size, align);
261 if (cand >= this_start)
262 return cand;
263 }
264
265 return 0;
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
284 phys_addr_t align, phys_addr_t start,
285 phys_addr_t end, int nid,
286 enum memblock_flags flags)
287{
288
289 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
290 end == MEMBLOCK_ALLOC_KASAN)
291 end = memblock.current_limit;
292
293
294 start = max_t(phys_addr_t, start, PAGE_SIZE);
295 end = max(start, end);
296
297 if (memblock_bottom_up())
298 return __memblock_find_range_bottom_up(start, end, size, align,
299 nid, flags);
300 else
301 return __memblock_find_range_top_down(start, end, size, align,
302 nid, flags);
303}
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
319 phys_addr_t end, phys_addr_t size,
320 phys_addr_t align)
321{
322 phys_addr_t ret;
323 enum memblock_flags flags = choose_memblock_flags();
324
325again:
326 ret = memblock_find_in_range_node(size, align, start, end,
327 NUMA_NO_NODE, flags);
328
329 if (!ret && (flags & MEMBLOCK_MIRROR)) {
330 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
331 &size);
332 flags &= ~MEMBLOCK_MIRROR;
333 goto again;
334 }
335
336 return ret;
337}
338
339static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
340{
341 type->total_size -= type->regions[r].size;
342 memmove(&type->regions[r], &type->regions[r + 1],
343 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
344 type->cnt--;
345
346
347 if (type->cnt == 0) {
348 WARN_ON(type->total_size != 0);
349 type->cnt = 1;
350 type->regions[0].base = 0;
351 type->regions[0].size = 0;
352 type->regions[0].flags = 0;
353 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
354 }
355}
356
357#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
358
359
360
361void __init memblock_discard(void)
362{
363 phys_addr_t addr, size;
364
365 if (memblock.reserved.regions != memblock_reserved_init_regions) {
366 addr = __pa(memblock.reserved.regions);
367 size = PAGE_ALIGN(sizeof(struct memblock_region) *
368 memblock.reserved.max);
369 __memblock_free_late(addr, size);
370 }
371
372 if (memblock.memory.regions != memblock_memory_init_regions) {
373 addr = __pa(memblock.memory.regions);
374 size = PAGE_ALIGN(sizeof(struct memblock_region) *
375 memblock.memory.max);
376 __memblock_free_late(addr, size);
377 }
378
379 memblock_memory = NULL;
380}
381#endif
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398static int __init_memblock memblock_double_array(struct memblock_type *type,
399 phys_addr_t new_area_start,
400 phys_addr_t new_area_size)
401{
402 struct memblock_region *new_array, *old_array;
403 phys_addr_t old_alloc_size, new_alloc_size;
404 phys_addr_t old_size, new_size, addr, new_end;
405 int use_slab = slab_is_available();
406 int *in_slab;
407
408
409
410
411 if (!memblock_can_resize)
412 return -1;
413
414
415 old_size = type->max * sizeof(struct memblock_region);
416 new_size = old_size << 1;
417
418
419
420
421 old_alloc_size = PAGE_ALIGN(old_size);
422 new_alloc_size = PAGE_ALIGN(new_size);
423
424
425 if (type == &memblock.memory)
426 in_slab = &memblock_memory_in_slab;
427 else
428 in_slab = &memblock_reserved_in_slab;
429
430
431 if (use_slab) {
432 new_array = kmalloc(new_size, GFP_KERNEL);
433 addr = new_array ? __pa(new_array) : 0;
434 } else {
435
436 if (type != &memblock.reserved)
437 new_area_start = new_area_size = 0;
438
439 addr = memblock_find_in_range(new_area_start + new_area_size,
440 memblock.current_limit,
441 new_alloc_size, PAGE_SIZE);
442 if (!addr && new_area_size)
443 addr = memblock_find_in_range(0,
444 min(new_area_start, memblock.current_limit),
445 new_alloc_size, PAGE_SIZE);
446
447 new_array = addr ? __va(addr) : NULL;
448 }
449 if (!addr) {
450 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
451 type->name, type->max, type->max * 2);
452 return -1;
453 }
454
455 new_end = addr + new_size - 1;
456 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
457 type->name, type->max * 2, &addr, &new_end);
458
459
460
461
462
463
464 memcpy(new_array, type->regions, old_size);
465 memset(new_array + type->max, 0, old_size);
466 old_array = type->regions;
467 type->regions = new_array;
468 type->max <<= 1;
469
470
471 if (*in_slab)
472 kfree(old_array);
473 else if (old_array != memblock_memory_init_regions &&
474 old_array != memblock_reserved_init_regions)
475 memblock_free_ptr(old_array, old_alloc_size);
476
477
478
479
480
481 if (!use_slab)
482 BUG_ON(memblock_reserve(addr, new_alloc_size));
483
484
485 *in_slab = use_slab;
486
487 return 0;
488}
489
490
491
492
493
494
495
496static void __init_memblock memblock_merge_regions(struct memblock_type *type)
497{
498 int i = 0;
499
500
501 while (i < type->cnt - 1) {
502 struct memblock_region *this = &type->regions[i];
503 struct memblock_region *next = &type->regions[i + 1];
504
505 if (this->base + this->size != next->base ||
506 memblock_get_region_node(this) !=
507 memblock_get_region_node(next) ||
508 this->flags != next->flags) {
509 BUG_ON(this->base + this->size > next->base);
510 i++;
511 continue;
512 }
513
514 this->size += next->size;
515
516 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
517 type->cnt--;
518 }
519}
520
521
522
523
524
525
526
527
528
529
530
531
532
533static void __init_memblock memblock_insert_region(struct memblock_type *type,
534 int idx, phys_addr_t base,
535 phys_addr_t size,
536 int nid,
537 enum memblock_flags flags)
538{
539 struct memblock_region *rgn = &type->regions[idx];
540
541 BUG_ON(type->cnt >= type->max);
542 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
543 rgn->base = base;
544 rgn->size = size;
545 rgn->flags = flags;
546 memblock_set_region_node(rgn, nid);
547 type->cnt++;
548 type->total_size += size;
549}
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567static int __init_memblock memblock_add_range(struct memblock_type *type,
568 phys_addr_t base, phys_addr_t size,
569 int nid, enum memblock_flags flags)
570{
571 bool insert = false;
572 phys_addr_t obase = base;
573 phys_addr_t end = base + memblock_cap_size(base, &size);
574 int idx, nr_new;
575 struct memblock_region *rgn;
576
577 if (!size)
578 return 0;
579
580
581 if (type->regions[0].size == 0) {
582 WARN_ON(type->cnt != 1 || type->total_size);
583 type->regions[0].base = base;
584 type->regions[0].size = size;
585 type->regions[0].flags = flags;
586 memblock_set_region_node(&type->regions[0], nid);
587 type->total_size = size;
588 return 0;
589 }
590repeat:
591
592
593
594
595
596 base = obase;
597 nr_new = 0;
598
599 for_each_memblock_type(idx, type, rgn) {
600 phys_addr_t rbase = rgn->base;
601 phys_addr_t rend = rbase + rgn->size;
602
603 if (rbase >= end)
604 break;
605 if (rend <= base)
606 continue;
607
608
609
610
611 if (rbase > base) {
612#ifdef CONFIG_NUMA
613 WARN_ON(nid != memblock_get_region_node(rgn));
614#endif
615 WARN_ON(flags != rgn->flags);
616 nr_new++;
617 if (insert)
618 memblock_insert_region(type, idx++, base,
619 rbase - base, nid,
620 flags);
621 }
622
623 base = min(rend, end);
624 }
625
626
627 if (base < end) {
628 nr_new++;
629 if (insert)
630 memblock_insert_region(type, idx, base, end - base,
631 nid, flags);
632 }
633
634 if (!nr_new)
635 return 0;
636
637
638
639
640
641 if (!insert) {
642 while (type->cnt + nr_new > type->max)
643 if (memblock_double_array(type, obase, size) < 0)
644 return -ENOMEM;
645 insert = true;
646 goto repeat;
647 } else {
648 memblock_merge_regions(type);
649 return 0;
650 }
651}
652
653
654
655
656
657
658
659
660
661
662
663
664
665int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
666 int nid)
667{
668 phys_addr_t end = base + size - 1;
669
670 memblock_dbg("%s: [%pa-%pa] nid=%d %pS\n", __func__,
671 &base, &end, nid, (void *)_RET_IP_);
672
673 return memblock_add_range(&memblock.memory, base, size, nid, 0);
674}
675
676
677
678
679
680
681
682
683
684
685
686
687int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
688{
689 phys_addr_t end = base + size - 1;
690
691 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
692 &base, &end, (void *)_RET_IP_);
693
694 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
695}
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713static int __init_memblock memblock_isolate_range(struct memblock_type *type,
714 phys_addr_t base, phys_addr_t size,
715 int *start_rgn, int *end_rgn)
716{
717 phys_addr_t end = base + memblock_cap_size(base, &size);
718 int idx;
719 struct memblock_region *rgn;
720
721 *start_rgn = *end_rgn = 0;
722
723 if (!size)
724 return 0;
725
726
727 while (type->cnt + 2 > type->max)
728 if (memblock_double_array(type, base, size) < 0)
729 return -ENOMEM;
730
731 for_each_memblock_type(idx, type, rgn) {
732 phys_addr_t rbase = rgn->base;
733 phys_addr_t rend = rbase + rgn->size;
734
735 if (rbase >= end)
736 break;
737 if (rend <= base)
738 continue;
739
740 if (rbase < base) {
741
742
743
744
745 rgn->base = base;
746 rgn->size -= base - rbase;
747 type->total_size -= base - rbase;
748 memblock_insert_region(type, idx, rbase, base - rbase,
749 memblock_get_region_node(rgn),
750 rgn->flags);
751 } else if (rend > end) {
752
753
754
755
756 rgn->base = end;
757 rgn->size -= end - rbase;
758 type->total_size -= end - rbase;
759 memblock_insert_region(type, idx--, rbase, end - rbase,
760 memblock_get_region_node(rgn),
761 rgn->flags);
762 } else {
763
764 if (!*end_rgn)
765 *start_rgn = idx;
766 *end_rgn = idx + 1;
767 }
768 }
769
770 return 0;
771}
772
773static int __init_memblock memblock_remove_range(struct memblock_type *type,
774 phys_addr_t base, phys_addr_t size)
775{
776 int start_rgn, end_rgn;
777 int i, ret;
778
779 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
780 if (ret)
781 return ret;
782
783 for (i = end_rgn - 1; i >= start_rgn; i--)
784 memblock_remove_region(type, i);
785 return 0;
786}
787
788int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
789{
790 phys_addr_t end = base + size - 1;
791
792 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
793 &base, &end, (void *)_RET_IP_);
794
795 return memblock_remove_range(&memblock.memory, base, size);
796}
797
798
799
800
801
802
803
804
805
806void __init_memblock memblock_free_ptr(void *ptr, size_t size)
807{
808 if (ptr)
809 memblock_free(__pa(ptr), size);
810}
811
812
813
814
815
816
817
818
819
820int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
821{
822 phys_addr_t end = base + size - 1;
823
824 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
825 &base, &end, (void *)_RET_IP_);
826
827 kmemleak_free_part_phys(base, size);
828 return memblock_remove_range(&memblock.reserved, base, size);
829}
830
831int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
832{
833 phys_addr_t end = base + size - 1;
834
835 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
836 &base, &end, (void *)_RET_IP_);
837
838 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
839}
840
841#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
842int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
843{
844 phys_addr_t end = base + size - 1;
845
846 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
847 &base, &end, (void *)_RET_IP_);
848
849 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
850}
851#endif
852
853
854
855
856
857
858
859
860
861
862
863
864static int __init_memblock memblock_setclr_flag(phys_addr_t base,
865 phys_addr_t size, int set, int flag)
866{
867 struct memblock_type *type = &memblock.memory;
868 int i, ret, start_rgn, end_rgn;
869
870 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
871 if (ret)
872 return ret;
873
874 for (i = start_rgn; i < end_rgn; i++) {
875 struct memblock_region *r = &type->regions[i];
876
877 if (set)
878 r->flags |= flag;
879 else
880 r->flags &= ~flag;
881 }
882
883 memblock_merge_regions(type);
884 return 0;
885}
886
887
888
889
890
891
892
893
894int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
895{
896 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
897}
898
899
900
901
902
903
904
905
906int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
907{
908 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
909}
910
911
912
913
914
915
916
917
918int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
919{
920 system_has_some_mirror = true;
921
922 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
923}
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
941{
942 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
943}
944
945
946
947
948
949
950
951
952int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
953{
954 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
955}
956
957static bool should_skip_region(struct memblock_type *type,
958 struct memblock_region *m,
959 int nid, int flags)
960{
961 int m_nid = memblock_get_region_node(m);
962
963
964 if (type != memblock_memory)
965 return false;
966
967
968 if (nid != NUMA_NO_NODE && nid != m_nid)
969 return true;
970
971
972 if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
973 !(flags & MEMBLOCK_HOTPLUG))
974 return true;
975
976
977 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
978 return true;
979
980
981 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
982 return true;
983
984 return false;
985}
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1014 struct memblock_type *type_a,
1015 struct memblock_type *type_b, phys_addr_t *out_start,
1016 phys_addr_t *out_end, int *out_nid)
1017{
1018 int idx_a = *idx & 0xffffffff;
1019 int idx_b = *idx >> 32;
1020
1021 if (WARN_ONCE(nid == MAX_NUMNODES,
1022 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1023 nid = NUMA_NO_NODE;
1024
1025 for (; idx_a < type_a->cnt; idx_a++) {
1026 struct memblock_region *m = &type_a->regions[idx_a];
1027
1028 phys_addr_t m_start = m->base;
1029 phys_addr_t m_end = m->base + m->size;
1030 int m_nid = memblock_get_region_node(m);
1031
1032 if (should_skip_region(type_a, m, nid, flags))
1033 continue;
1034
1035 if (!type_b) {
1036 if (out_start)
1037 *out_start = m_start;
1038 if (out_end)
1039 *out_end = m_end;
1040 if (out_nid)
1041 *out_nid = m_nid;
1042 idx_a++;
1043 *idx = (u32)idx_a | (u64)idx_b << 32;
1044 return;
1045 }
1046
1047
1048 for (; idx_b < type_b->cnt + 1; idx_b++) {
1049 struct memblock_region *r;
1050 phys_addr_t r_start;
1051 phys_addr_t r_end;
1052
1053 r = &type_b->regions[idx_b];
1054 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1055 r_end = idx_b < type_b->cnt ?
1056 r->base : PHYS_ADDR_MAX;
1057
1058
1059
1060
1061
1062 if (r_start >= m_end)
1063 break;
1064
1065 if (m_start < r_end) {
1066 if (out_start)
1067 *out_start =
1068 max(m_start, r_start);
1069 if (out_end)
1070 *out_end = min(m_end, r_end);
1071 if (out_nid)
1072 *out_nid = m_nid;
1073
1074
1075
1076
1077 if (m_end <= r_end)
1078 idx_a++;
1079 else
1080 idx_b++;
1081 *idx = (u32)idx_a | (u64)idx_b << 32;
1082 return;
1083 }
1084 }
1085 }
1086
1087
1088 *idx = ULLONG_MAX;
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1109 enum memblock_flags flags,
1110 struct memblock_type *type_a,
1111 struct memblock_type *type_b,
1112 phys_addr_t *out_start,
1113 phys_addr_t *out_end, int *out_nid)
1114{
1115 int idx_a = *idx & 0xffffffff;
1116 int idx_b = *idx >> 32;
1117
1118 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1119 nid = NUMA_NO_NODE;
1120
1121 if (*idx == (u64)ULLONG_MAX) {
1122 idx_a = type_a->cnt - 1;
1123 if (type_b != NULL)
1124 idx_b = type_b->cnt;
1125 else
1126 idx_b = 0;
1127 }
1128
1129 for (; idx_a >= 0; idx_a--) {
1130 struct memblock_region *m = &type_a->regions[idx_a];
1131
1132 phys_addr_t m_start = m->base;
1133 phys_addr_t m_end = m->base + m->size;
1134 int m_nid = memblock_get_region_node(m);
1135
1136 if (should_skip_region(type_a, m, nid, flags))
1137 continue;
1138
1139 if (!type_b) {
1140 if (out_start)
1141 *out_start = m_start;
1142 if (out_end)
1143 *out_end = m_end;
1144 if (out_nid)
1145 *out_nid = m_nid;
1146 idx_a--;
1147 *idx = (u32)idx_a | (u64)idx_b << 32;
1148 return;
1149 }
1150
1151
1152 for (; idx_b >= 0; idx_b--) {
1153 struct memblock_region *r;
1154 phys_addr_t r_start;
1155 phys_addr_t r_end;
1156
1157 r = &type_b->regions[idx_b];
1158 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1159 r_end = idx_b < type_b->cnt ?
1160 r->base : PHYS_ADDR_MAX;
1161
1162
1163
1164
1165
1166 if (r_end <= m_start)
1167 break;
1168
1169 if (m_end > r_start) {
1170 if (out_start)
1171 *out_start = max(m_start, r_start);
1172 if (out_end)
1173 *out_end = min(m_end, r_end);
1174 if (out_nid)
1175 *out_nid = m_nid;
1176 if (m_start >= r_start)
1177 idx_a--;
1178 else
1179 idx_b--;
1180 *idx = (u32)idx_a | (u64)idx_b << 32;
1181 return;
1182 }
1183 }
1184 }
1185
1186 *idx = ULLONG_MAX;
1187}
1188
1189
1190
1191
1192void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1193 unsigned long *out_start_pfn,
1194 unsigned long *out_end_pfn, int *out_nid)
1195{
1196 struct memblock_type *type = &memblock.memory;
1197 struct memblock_region *r;
1198 int r_nid;
1199
1200 while (++*idx < type->cnt) {
1201 r = &type->regions[*idx];
1202 r_nid = memblock_get_region_node(r);
1203
1204 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1205 continue;
1206 if (nid == MAX_NUMNODES || nid == r_nid)
1207 break;
1208 }
1209 if (*idx >= type->cnt) {
1210 *idx = -1;
1211 return;
1212 }
1213
1214 if (out_start_pfn)
1215 *out_start_pfn = PFN_UP(r->base);
1216 if (out_end_pfn)
1217 *out_end_pfn = PFN_DOWN(r->base + r->size);
1218 if (out_nid)
1219 *out_nid = r_nid;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1236 struct memblock_type *type, int nid)
1237{
1238#ifdef CONFIG_NUMA
1239 int start_rgn, end_rgn;
1240 int i, ret;
1241
1242 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1243 if (ret)
1244 return ret;
1245
1246 for (i = start_rgn; i < end_rgn; i++)
1247 memblock_set_region_node(&type->regions[i], nid);
1248
1249 memblock_merge_regions(type);
1250#endif
1251 return 0;
1252}
1253
1254#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270void __init_memblock
1271__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1272 unsigned long *out_spfn, unsigned long *out_epfn)
1273{
1274 int zone_nid = zone_to_nid(zone);
1275 phys_addr_t spa, epa;
1276 int nid;
1277
1278 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1279 &memblock.memory, &memblock.reserved,
1280 &spa, &epa, &nid);
1281
1282 while (*idx != U64_MAX) {
1283 unsigned long epfn = PFN_DOWN(epa);
1284 unsigned long spfn = PFN_UP(spa);
1285
1286
1287
1288
1289
1290 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1291
1292 if (zone_end_pfn(zone) <= spfn) {
1293 *idx = U64_MAX;
1294 break;
1295 }
1296
1297 if (out_spfn)
1298 *out_spfn = max(zone->zone_start_pfn, spfn);
1299 if (out_epfn)
1300 *out_epfn = min(zone_end_pfn(zone), epfn);
1301
1302 return;
1303 }
1304
1305 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1306 &memblock.memory, &memblock.reserved,
1307 &spa, &epa, &nid);
1308 }
1309
1310
1311 if (out_spfn)
1312 *out_spfn = ULONG_MAX;
1313 if (out_epfn)
1314 *out_epfn = 0;
1315}
1316
1317#endif
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1345 phys_addr_t align, phys_addr_t start,
1346 phys_addr_t end, int nid,
1347 bool exact_nid)
1348{
1349 enum memblock_flags flags = choose_memblock_flags();
1350 phys_addr_t found;
1351
1352 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1353 nid = NUMA_NO_NODE;
1354
1355 if (!align) {
1356
1357 dump_stack();
1358 align = SMP_CACHE_BYTES;
1359 }
1360
1361again:
1362 found = memblock_find_in_range_node(size, align, start, end, nid,
1363 flags);
1364 if (found && !memblock_reserve(found, size))
1365 goto done;
1366
1367 if (nid != NUMA_NO_NODE && !exact_nid) {
1368 found = memblock_find_in_range_node(size, align, start,
1369 end, NUMA_NO_NODE,
1370 flags);
1371 if (found && !memblock_reserve(found, size))
1372 goto done;
1373 }
1374
1375 if (flags & MEMBLOCK_MIRROR) {
1376 flags &= ~MEMBLOCK_MIRROR;
1377 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1378 &size);
1379 goto again;
1380 }
1381
1382 return 0;
1383
1384done:
1385
1386 if (end != MEMBLOCK_ALLOC_KASAN)
1387
1388
1389
1390
1391
1392
1393 kmemleak_alloc_phys(found, size, 0, 0);
1394
1395 return found;
1396}
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1411 phys_addr_t align,
1412 phys_addr_t start,
1413 phys_addr_t end)
1414{
1415 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1416 __func__, (u64)size, (u64)align, &start, &end,
1417 (void *)_RET_IP_);
1418 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1419 false);
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1436{
1437 return memblock_alloc_range_nid(size, align, 0,
1438 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1439}
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461static void * __init memblock_alloc_internal(
1462 phys_addr_t size, phys_addr_t align,
1463 phys_addr_t min_addr, phys_addr_t max_addr,
1464 int nid, bool exact_nid)
1465{
1466 phys_addr_t alloc;
1467
1468
1469
1470
1471
1472
1473 if (WARN_ON_ONCE(slab_is_available()))
1474 return kzalloc_node(size, GFP_NOWAIT, nid);
1475
1476 if (max_addr > memblock.current_limit)
1477 max_addr = memblock.current_limit;
1478
1479 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1480 exact_nid);
1481
1482
1483 if (!alloc && min_addr)
1484 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1485 exact_nid);
1486
1487 if (!alloc)
1488 return NULL;
1489
1490 return phys_to_virt(alloc);
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511void * __init memblock_alloc_exact_nid_raw(
1512 phys_addr_t size, phys_addr_t align,
1513 phys_addr_t min_addr, phys_addr_t max_addr,
1514 int nid)
1515{
1516 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1517 __func__, (u64)size, (u64)align, nid, &min_addr,
1518 &max_addr, (void *)_RET_IP_);
1519
1520 return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1521 true);
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543void * __init memblock_alloc_try_nid_raw(
1544 phys_addr_t size, phys_addr_t align,
1545 phys_addr_t min_addr, phys_addr_t max_addr,
1546 int nid)
1547{
1548 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1549 __func__, (u64)size, (u64)align, nid, &min_addr,
1550 &max_addr, (void *)_RET_IP_);
1551
1552 return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
1553 false);
1554}
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573void * __init memblock_alloc_try_nid(
1574 phys_addr_t size, phys_addr_t align,
1575 phys_addr_t min_addr, phys_addr_t max_addr,
1576 int nid)
1577{
1578 void *ptr;
1579
1580 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1581 __func__, (u64)size, (u64)align, nid, &min_addr,
1582 &max_addr, (void *)_RET_IP_);
1583 ptr = memblock_alloc_internal(size, align,
1584 min_addr, max_addr, nid, false);
1585 if (ptr)
1586 memset(ptr, 0, size);
1587
1588 return ptr;
1589}
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1601{
1602 phys_addr_t cursor, end;
1603
1604 end = base + size - 1;
1605 memblock_dbg("%s: [%pa-%pa] %pS\n",
1606 __func__, &base, &end, (void *)_RET_IP_);
1607 kmemleak_free_part_phys(base, size);
1608 cursor = PFN_UP(base);
1609 end = PFN_DOWN(base + size);
1610
1611 for (; cursor < end; cursor++) {
1612 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1613 totalram_pages_inc();
1614 }
1615}
1616
1617
1618
1619
1620
1621phys_addr_t __init_memblock memblock_phys_mem_size(void)
1622{
1623 return memblock.memory.total_size;
1624}
1625
1626phys_addr_t __init_memblock memblock_reserved_size(void)
1627{
1628 return memblock.reserved.total_size;
1629}
1630
1631
1632phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1633{
1634 return memblock.memory.regions[0].base;
1635}
1636
1637phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1638{
1639 int idx = memblock.memory.cnt - 1;
1640
1641 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1642}
1643
1644static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1645{
1646 phys_addr_t max_addr = PHYS_ADDR_MAX;
1647 struct memblock_region *r;
1648
1649
1650
1651
1652
1653
1654 for_each_mem_region(r) {
1655 if (limit <= r->size) {
1656 max_addr = r->base + limit;
1657 break;
1658 }
1659 limit -= r->size;
1660 }
1661
1662 return max_addr;
1663}
1664
1665void __init memblock_enforce_memory_limit(phys_addr_t limit)
1666{
1667 phys_addr_t max_addr;
1668
1669 if (!limit)
1670 return;
1671
1672 max_addr = __find_max_addr(limit);
1673
1674
1675 if (max_addr == PHYS_ADDR_MAX)
1676 return;
1677
1678
1679 memblock_remove_range(&memblock.memory, max_addr,
1680 PHYS_ADDR_MAX);
1681 memblock_remove_range(&memblock.reserved, max_addr,
1682 PHYS_ADDR_MAX);
1683}
1684
1685void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1686{
1687 int start_rgn, end_rgn;
1688 int i, ret;
1689
1690 if (!size)
1691 return;
1692
1693 if (!memblock_memory->total_size) {
1694 pr_warn("%s: No memory registered yet\n", __func__);
1695 return;
1696 }
1697
1698 ret = memblock_isolate_range(&memblock.memory, base, size,
1699 &start_rgn, &end_rgn);
1700 if (ret)
1701 return;
1702
1703
1704 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1705 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1706 memblock_remove_region(&memblock.memory, i);
1707
1708 for (i = start_rgn - 1; i >= 0; i--)
1709 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1710 memblock_remove_region(&memblock.memory, i);
1711
1712
1713 memblock_remove_range(&memblock.reserved, 0, base);
1714 memblock_remove_range(&memblock.reserved,
1715 base + size, PHYS_ADDR_MAX);
1716}
1717
1718void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1719{
1720 phys_addr_t max_addr;
1721
1722 if (!limit)
1723 return;
1724
1725 max_addr = __find_max_addr(limit);
1726
1727
1728 if (max_addr == PHYS_ADDR_MAX)
1729 return;
1730
1731 memblock_cap_memory_range(0, max_addr);
1732}
1733
1734static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1735{
1736 unsigned int left = 0, right = type->cnt;
1737
1738 do {
1739 unsigned int mid = (right + left) / 2;
1740
1741 if (addr < type->regions[mid].base)
1742 right = mid;
1743 else if (addr >= (type->regions[mid].base +
1744 type->regions[mid].size))
1745 left = mid + 1;
1746 else
1747 return mid;
1748 } while (left < right);
1749 return -1;
1750}
1751
1752bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1753{
1754 return memblock_search(&memblock.reserved, addr) != -1;
1755}
1756
1757bool __init_memblock memblock_is_memory(phys_addr_t addr)
1758{
1759 return memblock_search(&memblock.memory, addr) != -1;
1760}
1761
1762bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1763{
1764 int i = memblock_search(&memblock.memory, addr);
1765
1766 if (i == -1)
1767 return false;
1768 return !memblock_is_nomap(&memblock.memory.regions[i]);
1769}
1770
1771int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1772 unsigned long *start_pfn, unsigned long *end_pfn)
1773{
1774 struct memblock_type *type = &memblock.memory;
1775 int mid = memblock_search(type, PFN_PHYS(pfn));
1776
1777 if (mid == -1)
1778 return -1;
1779
1780 *start_pfn = PFN_DOWN(type->regions[mid].base);
1781 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1782
1783 return memblock_get_region_node(&type->regions[mid]);
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1797{
1798 int idx = memblock_search(&memblock.memory, base);
1799 phys_addr_t end = base + memblock_cap_size(base, &size);
1800
1801 if (idx == -1)
1802 return false;
1803 return (memblock.memory.regions[idx].base +
1804 memblock.memory.regions[idx].size) >= end;
1805}
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1819{
1820 return memblock_overlaps_region(&memblock.reserved, base, size);
1821}
1822
1823void __init_memblock memblock_trim_memory(phys_addr_t align)
1824{
1825 phys_addr_t start, end, orig_start, orig_end;
1826 struct memblock_region *r;
1827
1828 for_each_mem_region(r) {
1829 orig_start = r->base;
1830 orig_end = r->base + r->size;
1831 start = round_up(orig_start, align);
1832 end = round_down(orig_end, align);
1833
1834 if (start == orig_start && end == orig_end)
1835 continue;
1836
1837 if (start < end) {
1838 r->base = start;
1839 r->size = end - start;
1840 } else {
1841 memblock_remove_region(&memblock.memory,
1842 r - memblock.memory.regions);
1843 r--;
1844 }
1845 }
1846}
1847
1848void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1849{
1850 memblock.current_limit = limit;
1851}
1852
1853phys_addr_t __init_memblock memblock_get_current_limit(void)
1854{
1855 return memblock.current_limit;
1856}
1857
1858static void __init_memblock memblock_dump(struct memblock_type *type)
1859{
1860 phys_addr_t base, end, size;
1861 enum memblock_flags flags;
1862 int idx;
1863 struct memblock_region *rgn;
1864
1865 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1866
1867 for_each_memblock_type(idx, type, rgn) {
1868 char nid_buf[32] = "";
1869
1870 base = rgn->base;
1871 size = rgn->size;
1872 end = base + size - 1;
1873 flags = rgn->flags;
1874#ifdef CONFIG_NUMA
1875 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1876 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1877 memblock_get_region_node(rgn));
1878#endif
1879 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1880 type->name, idx, &base, &end, &size, nid_buf, flags);
1881 }
1882}
1883
1884static void __init_memblock __memblock_dump_all(void)
1885{
1886 pr_info("MEMBLOCK configuration:\n");
1887 pr_info(" memory size = %pa reserved size = %pa\n",
1888 &memblock.memory.total_size,
1889 &memblock.reserved.total_size);
1890
1891 memblock_dump(&memblock.memory);
1892 memblock_dump(&memblock.reserved);
1893#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1894 memblock_dump(&physmem);
1895#endif
1896}
1897
1898void __init_memblock memblock_dump_all(void)
1899{
1900 if (memblock_debug)
1901 __memblock_dump_all();
1902}
1903
1904void __init memblock_allow_resize(void)
1905{
1906 memblock_can_resize = 1;
1907}
1908
1909static int __init early_memblock(char *p)
1910{
1911 if (p && strstr(p, "debug"))
1912 memblock_debug = 1;
1913 return 0;
1914}
1915early_param("memblock", early_memblock);
1916
1917static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1918{
1919 struct page *start_pg, *end_pg;
1920 phys_addr_t pg, pgend;
1921
1922
1923
1924
1925 start_pg = pfn_to_page(start_pfn - 1) + 1;
1926 end_pg = pfn_to_page(end_pfn - 1) + 1;
1927
1928
1929
1930
1931
1932 pg = PAGE_ALIGN(__pa(start_pg));
1933 pgend = __pa(end_pg) & PAGE_MASK;
1934
1935
1936
1937
1938
1939 if (pg < pgend)
1940 memblock_free(pg, pgend - pg);
1941}
1942
1943
1944
1945
1946static void __init free_unused_memmap(void)
1947{
1948 unsigned long start, end, prev_end = 0;
1949 int i;
1950
1951 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1952 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1953 return;
1954
1955
1956
1957
1958
1959 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1960#ifdef CONFIG_SPARSEMEM
1961
1962
1963
1964
1965 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1966#endif
1967
1968
1969
1970
1971
1972 start = round_down(start, pageblock_nr_pages);
1973
1974
1975
1976
1977
1978 if (prev_end && prev_end < start)
1979 free_memmap(prev_end, start);
1980
1981
1982
1983
1984
1985
1986 prev_end = ALIGN(end, pageblock_nr_pages);
1987 }
1988
1989#ifdef CONFIG_SPARSEMEM
1990 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
1991 prev_end = ALIGN(end, pageblock_nr_pages);
1992 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
1993 }
1994#endif
1995}
1996
1997static void __init __free_pages_memory(unsigned long start, unsigned long end)
1998{
1999 int order;
2000
2001 while (start < end) {
2002 order = min(MAX_ORDER - 1UL, __ffs(start));
2003
2004 while (start + (1UL << order) > end)
2005 order--;
2006
2007 memblock_free_pages(pfn_to_page(start), start, order);
2008
2009 start += (1UL << order);
2010 }
2011}
2012
2013static unsigned long __init __free_memory_core(phys_addr_t start,
2014 phys_addr_t end)
2015{
2016 unsigned long start_pfn = PFN_UP(start);
2017 unsigned long end_pfn = min_t(unsigned long,
2018 PFN_DOWN(end), max_low_pfn);
2019
2020 if (start_pfn >= end_pfn)
2021 return 0;
2022
2023 __free_pages_memory(start_pfn, end_pfn);
2024
2025 return end_pfn - start_pfn;
2026}
2027
2028static void __init memmap_init_reserved_pages(void)
2029{
2030 struct memblock_region *region;
2031 phys_addr_t start, end;
2032 u64 i;
2033
2034
2035 for_each_reserved_mem_range(i, &start, &end)
2036 reserve_bootmem_region(start, end);
2037
2038
2039 for_each_mem_region(region) {
2040 if (memblock_is_nomap(region)) {
2041 start = region->base;
2042 end = start + region->size;
2043 reserve_bootmem_region(start, end);
2044 }
2045 }
2046}
2047
2048static unsigned long __init free_low_memory_core_early(void)
2049{
2050 unsigned long count = 0;
2051 phys_addr_t start, end;
2052 u64 i;
2053
2054 memblock_clear_hotplug(0, -1);
2055
2056 memmap_init_reserved_pages();
2057
2058
2059
2060
2061
2062
2063 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2064 NULL)
2065 count += __free_memory_core(start, end);
2066
2067 return count;
2068}
2069
2070static int reset_managed_pages_done __initdata;
2071
2072void reset_node_managed_pages(pg_data_t *pgdat)
2073{
2074 struct zone *z;
2075
2076 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2077 atomic_long_set(&z->managed_pages, 0);
2078}
2079
2080void __init reset_all_zones_managed_pages(void)
2081{
2082 struct pglist_data *pgdat;
2083
2084 if (reset_managed_pages_done)
2085 return;
2086
2087 for_each_online_pgdat(pgdat)
2088 reset_node_managed_pages(pgdat);
2089
2090 reset_managed_pages_done = 1;
2091}
2092
2093
2094
2095
2096void __init memblock_free_all(void)
2097{
2098 unsigned long pages;
2099
2100 free_unused_memmap();
2101 reset_all_zones_managed_pages();
2102
2103 pages = free_low_memory_core_early();
2104 totalram_pages_add(pages);
2105}
2106
2107#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2108
2109static int memblock_debug_show(struct seq_file *m, void *private)
2110{
2111 struct memblock_type *type = m->private;
2112 struct memblock_region *reg;
2113 int i;
2114 phys_addr_t end;
2115
2116 for (i = 0; i < type->cnt; i++) {
2117 reg = &type->regions[i];
2118 end = reg->base + reg->size - 1;
2119
2120 seq_printf(m, "%4d: ", i);
2121 seq_printf(m, "%pa..%pa\n", ®->base, &end);
2122 }
2123 return 0;
2124}
2125DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2126
2127static int __init memblock_init_debugfs(void)
2128{
2129 struct dentry *root = debugfs_create_dir("memblock", NULL);
2130
2131 debugfs_create_file("memory", 0444, root,
2132 &memblock.memory, &memblock_debug_fops);
2133 debugfs_create_file("reserved", 0444, root,
2134 &memblock.reserved, &memblock_debug_fops);
2135#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2136 debugfs_create_file("physmem", 0444, root, &physmem,
2137 &memblock_debug_fops);
2138#endif
2139
2140 return 0;
2141}
2142__initcall(memblock_init_debugfs);
2143
2144#endif
2145