1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/bitops.h>
17#include <linux/poison.h>
18#include <linux/pfn.h>
19#include <linux/debugfs.h>
20#include <linux/kmemleak.h>
21#include <linux/seq_file.h>
22#include <linux/memblock.h>
23#include <linux/bootmem.h>
24
25#include <asm/sections.h>
26#include <linux/io.h>
27
28#include "internal.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
86static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
87#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
88static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
89#endif
90
91struct memblock memblock __initdata_memblock = {
92 .memory.regions = memblock_memory_init_regions,
93 .memory.cnt = 1,
94 .memory.max = INIT_MEMBLOCK_REGIONS,
95 .memory.name = "memory",
96
97 .reserved.regions = memblock_reserved_init_regions,
98 .reserved.cnt = 1,
99 .reserved.max = INIT_MEMBLOCK_REGIONS,
100 .reserved.name = "reserved",
101
102#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
103 .physmem.regions = memblock_physmem_init_regions,
104 .physmem.cnt = 1,
105 .physmem.max = INIT_PHYSMEM_REGIONS,
106 .physmem.name = "physmem",
107#endif
108
109 .bottom_up = false,
110 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
111};
112
113int memblock_debug __initdata_memblock;
114static bool system_has_some_mirror __initdata_memblock = false;
115static int memblock_can_resize __initdata_memblock;
116static int memblock_memory_in_slab __initdata_memblock = 0;
117static int memblock_reserved_in_slab __initdata_memblock = 0;
118
119enum memblock_flags __init_memblock choose_memblock_flags(void)
120{
121 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
122}
123
124
125static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
126{
127 return *size = min(*size, PHYS_ADDR_MAX - base);
128}
129
130
131
132
133static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
134 phys_addr_t base2, phys_addr_t size2)
135{
136 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
137}
138
139bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
140 phys_addr_t base, phys_addr_t size)
141{
142 unsigned long i;
143
144 for (i = 0; i < type->cnt; i++)
145 if (memblock_addrs_overlap(base, size, type->regions[i].base,
146 type->regions[i].size))
147 break;
148 return i < type->cnt;
149}
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166static phys_addr_t __init_memblock
167__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
168 phys_addr_t size, phys_addr_t align, int nid,
169 enum memblock_flags flags)
170{
171 phys_addr_t this_start, this_end, cand;
172 u64 i;
173
174 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
175 this_start = clamp(this_start, start, end);
176 this_end = clamp(this_end, start, end);
177
178 cand = round_up(this_start, align);
179 if (cand < this_end && this_end - cand >= size)
180 return cand;
181 }
182
183 return 0;
184}
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201static phys_addr_t __init_memblock
202__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
203 phys_addr_t size, phys_addr_t align, int nid,
204 enum memblock_flags flags)
205{
206 phys_addr_t this_start, this_end, cand;
207 u64 i;
208
209 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
210 NULL) {
211 this_start = clamp(this_start, start, end);
212 this_end = clamp(this_end, start, end);
213
214 if (this_end < size)
215 continue;
216
217 cand = round_down(this_end - size, align);
218 if (cand >= this_start)
219 return cand;
220 }
221
222 return 0;
223}
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
249 phys_addr_t align, phys_addr_t start,
250 phys_addr_t end, int nid,
251 enum memblock_flags flags)
252{
253 phys_addr_t kernel_end, ret;
254
255
256 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
257 end = memblock.current_limit;
258
259
260 start = max_t(phys_addr_t, start, PAGE_SIZE);
261 end = max(start, end);
262 kernel_end = __pa_symbol(_end);
263
264
265
266
267
268 if (memblock_bottom_up() && end > kernel_end) {
269 phys_addr_t bottom_up_start;
270
271
272 bottom_up_start = max(start, kernel_end);
273
274
275 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
276 size, align, nid, flags);
277 if (ret)
278 return ret;
279
280
281
282
283
284
285
286
287
288
289
290 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
291 "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
292 }
293
294 return __memblock_find_range_top_down(start, end, size, align, nid,
295 flags);
296}
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
312 phys_addr_t end, phys_addr_t size,
313 phys_addr_t align)
314{
315 phys_addr_t ret;
316 enum memblock_flags flags = choose_memblock_flags();
317
318again:
319 ret = memblock_find_in_range_node(size, align, start, end,
320 NUMA_NO_NODE, flags);
321
322 if (!ret && (flags & MEMBLOCK_MIRROR)) {
323 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
324 &size);
325 flags &= ~MEMBLOCK_MIRROR;
326 goto again;
327 }
328
329 return ret;
330}
331
332static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
333{
334 type->total_size -= type->regions[r].size;
335 memmove(&type->regions[r], &type->regions[r + 1],
336 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
337 type->cnt--;
338
339
340 if (type->cnt == 0) {
341 WARN_ON(type->total_size != 0);
342 type->cnt = 1;
343 type->regions[0].base = 0;
344 type->regions[0].size = 0;
345 type->regions[0].flags = 0;
346 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
347 }
348}
349
350#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
351
352
353
354void __init memblock_discard(void)
355{
356 phys_addr_t addr, size;
357
358 if (memblock.reserved.regions != memblock_reserved_init_regions) {
359 addr = __pa(memblock.reserved.regions);
360 size = PAGE_ALIGN(sizeof(struct memblock_region) *
361 memblock.reserved.max);
362 __memblock_free_late(addr, size);
363 }
364
365 if (memblock.memory.regions != memblock_memory_init_regions) {
366 addr = __pa(memblock.memory.regions);
367 size = PAGE_ALIGN(sizeof(struct memblock_region) *
368 memblock.memory.max);
369 __memblock_free_late(addr, size);
370 }
371}
372#endif
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389static int __init_memblock memblock_double_array(struct memblock_type *type,
390 phys_addr_t new_area_start,
391 phys_addr_t new_area_size)
392{
393 struct memblock_region *new_array, *old_array;
394 phys_addr_t old_alloc_size, new_alloc_size;
395 phys_addr_t old_size, new_size, addr, new_end;
396 int use_slab = slab_is_available();
397 int *in_slab;
398
399
400
401
402 if (!memblock_can_resize)
403 return -1;
404
405
406 old_size = type->max * sizeof(struct memblock_region);
407 new_size = old_size << 1;
408
409
410
411
412 old_alloc_size = PAGE_ALIGN(old_size);
413 new_alloc_size = PAGE_ALIGN(new_size);
414
415
416 if (type == &memblock.memory)
417 in_slab = &memblock_memory_in_slab;
418 else
419 in_slab = &memblock_reserved_in_slab;
420
421
422
423
424
425
426
427
428
429
430
431
432 if (use_slab) {
433 new_array = kmalloc(new_size, GFP_KERNEL);
434 addr = new_array ? __pa(new_array) : 0;
435 } else {
436
437 if (type != &memblock.reserved)
438 new_area_start = new_area_size = 0;
439
440 addr = memblock_find_in_range(new_area_start + new_area_size,
441 memblock.current_limit,
442 new_alloc_size, PAGE_SIZE);
443 if (!addr && new_area_size)
444 addr = memblock_find_in_range(0,
445 min(new_area_start, memblock.current_limit),
446 new_alloc_size, PAGE_SIZE);
447
448 new_array = addr ? __va(addr) : NULL;
449 }
450 if (!addr) {
451 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
452 type->name, type->max, type->max * 2);
453 return -1;
454 }
455
456 new_end = addr + new_size - 1;
457 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
458 type->name, type->max * 2, &addr, &new_end);
459
460
461
462
463
464
465 memcpy(new_array, type->regions, old_size);
466 memset(new_array + type->max, 0, old_size);
467 old_array = type->regions;
468 type->regions = new_array;
469 type->max <<= 1;
470
471
472 if (*in_slab)
473 kfree(old_array);
474 else if (old_array != memblock_memory_init_regions &&
475 old_array != memblock_reserved_init_regions)
476 memblock_free(__pa(old_array), old_alloc_size);
477
478
479
480
481
482 if (!use_slab)
483 BUG_ON(memblock_reserve(addr, new_alloc_size));
484
485
486 *in_slab = use_slab;
487
488 return 0;
489}
490
491
492
493
494
495
496
497static void __init_memblock memblock_merge_regions(struct memblock_type *type)
498{
499 int i = 0;
500
501
502 while (i < type->cnt - 1) {
503 struct memblock_region *this = &type->regions[i];
504 struct memblock_region *next = &type->regions[i + 1];
505
506 if (this->base + this->size != next->base ||
507 memblock_get_region_node(this) !=
508 memblock_get_region_node(next) ||
509 this->flags != next->flags) {
510 BUG_ON(this->base + this->size > next->base);
511 i++;
512 continue;
513 }
514
515 this->size += next->size;
516
517 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
518 type->cnt--;
519 }
520}
521
522
523
524
525
526
527
528
529
530
531
532
533
534static void __init_memblock memblock_insert_region(struct memblock_type *type,
535 int idx, phys_addr_t base,
536 phys_addr_t size,
537 int nid,
538 enum memblock_flags flags)
539{
540 struct memblock_region *rgn = &type->regions[idx];
541
542 BUG_ON(type->cnt >= type->max);
543 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
544 rgn->base = base;
545 rgn->size = size;
546 rgn->flags = flags;
547 memblock_set_region_node(rgn, nid);
548 type->cnt++;
549 type->total_size += size;
550}
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568int __init_memblock memblock_add_range(struct memblock_type *type,
569 phys_addr_t base, phys_addr_t size,
570 int nid, enum memblock_flags flags)
571{
572 bool insert = false;
573 phys_addr_t obase = base;
574 phys_addr_t end = base + memblock_cap_size(base, &size);
575 int idx, nr_new;
576 struct memblock_region *rgn;
577
578 if (!size)
579 return 0;
580
581
582 if (type->regions[0].size == 0) {
583 WARN_ON(type->cnt != 1 || type->total_size);
584 type->regions[0].base = base;
585 type->regions[0].size = size;
586 type->regions[0].flags = flags;
587 memblock_set_region_node(&type->regions[0], nid);
588 type->total_size = size;
589 return 0;
590 }
591repeat:
592
593
594
595
596
597 base = obase;
598 nr_new = 0;
599
600 for_each_memblock_type(idx, type, rgn) {
601 phys_addr_t rbase = rgn->base;
602 phys_addr_t rend = rbase + rgn->size;
603
604 if (rbase >= end)
605 break;
606 if (rend <= base)
607 continue;
608
609
610
611
612 if (rbase > base) {
613#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
614 WARN_ON(nid != memblock_get_region_node(rgn));
615#endif
616 WARN_ON(flags != rgn->flags);
617 nr_new++;
618 if (insert)
619 memblock_insert_region(type, idx++, base,
620 rbase - base, nid,
621 flags);
622 }
623
624 base = min(rend, end);
625 }
626
627
628 if (base < end) {
629 nr_new++;
630 if (insert)
631 memblock_insert_region(type, idx, base, end - base,
632 nid, flags);
633 }
634
635 if (!nr_new)
636 return 0;
637
638
639
640
641
642 if (!insert) {
643 while (type->cnt + nr_new > type->max)
644 if (memblock_double_array(type, obase, size) < 0)
645 return -ENOMEM;
646 insert = true;
647 goto repeat;
648 } else {
649 memblock_merge_regions(type);
650 return 0;
651 }
652}
653
654
655
656
657
658
659
660
661
662
663
664
665
666int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
667 int nid)
668{
669 return memblock_add_range(&memblock.memory, base, size, nid, 0);
670}
671
672
673
674
675
676
677
678
679
680
681
682
683int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
684{
685 phys_addr_t end = base + size - 1;
686
687 memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
688 &base, &end, (void *)_RET_IP_);
689
690 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
691}
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709static int __init_memblock memblock_isolate_range(struct memblock_type *type,
710 phys_addr_t base, phys_addr_t size,
711 int *start_rgn, int *end_rgn)
712{
713 phys_addr_t end = base + memblock_cap_size(base, &size);
714 int idx;
715 struct memblock_region *rgn;
716
717 *start_rgn = *end_rgn = 0;
718
719 if (!size)
720 return 0;
721
722
723 while (type->cnt + 2 > type->max)
724 if (memblock_double_array(type, base, size) < 0)
725 return -ENOMEM;
726
727 for_each_memblock_type(idx, type, rgn) {
728 phys_addr_t rbase = rgn->base;
729 phys_addr_t rend = rbase + rgn->size;
730
731 if (rbase >= end)
732 break;
733 if (rend <= base)
734 continue;
735
736 if (rbase < base) {
737
738
739
740
741 rgn->base = base;
742 rgn->size -= base - rbase;
743 type->total_size -= base - rbase;
744 memblock_insert_region(type, idx, rbase, base - rbase,
745 memblock_get_region_node(rgn),
746 rgn->flags);
747 } else if (rend > end) {
748
749
750
751
752 rgn->base = end;
753 rgn->size -= end - rbase;
754 type->total_size -= end - rbase;
755 memblock_insert_region(type, idx--, rbase, end - rbase,
756 memblock_get_region_node(rgn),
757 rgn->flags);
758 } else {
759
760 if (!*end_rgn)
761 *start_rgn = idx;
762 *end_rgn = idx + 1;
763 }
764 }
765
766 return 0;
767}
768
769static int __init_memblock memblock_remove_range(struct memblock_type *type,
770 phys_addr_t base, phys_addr_t size)
771{
772 int start_rgn, end_rgn;
773 int i, ret;
774
775 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
776 if (ret)
777 return ret;
778
779 for (i = end_rgn - 1; i >= start_rgn; i--)
780 memblock_remove_region(type, i);
781 return 0;
782}
783
784int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
785{
786 phys_addr_t end = base + size - 1;
787
788 memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
789 &base, &end, (void *)_RET_IP_);
790
791 return memblock_remove_range(&memblock.memory, base, size);
792}
793
794
795int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
796{
797 phys_addr_t end = base + size - 1;
798
799 memblock_dbg(" memblock_free: [%pa-%pa] %pF\n",
800 &base, &end, (void *)_RET_IP_);
801
802 kmemleak_free_part_phys(base, size);
803 return memblock_remove_range(&memblock.reserved, base, size);
804}
805
806int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
807{
808 phys_addr_t end = base + size - 1;
809
810 memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
811 &base, &end, (void *)_RET_IP_);
812
813 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
814}
815
816
817
818
819
820
821
822
823
824
825
826
827static int __init_memblock memblock_setclr_flag(phys_addr_t base,
828 phys_addr_t size, int set, int flag)
829{
830 struct memblock_type *type = &memblock.memory;
831 int i, ret, start_rgn, end_rgn;
832
833 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
834 if (ret)
835 return ret;
836
837 for (i = start_rgn; i < end_rgn; i++)
838 if (set)
839 memblock_set_region_flags(&type->regions[i], flag);
840 else
841 memblock_clear_region_flags(&type->regions[i], flag);
842
843 memblock_merge_regions(type);
844 return 0;
845}
846
847
848
849
850
851
852
853
854int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
855{
856 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
857}
858
859
860
861
862
863
864
865
866int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
867{
868 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
869}
870
871
872
873
874
875
876
877
878int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
879{
880 system_has_some_mirror = true;
881
882 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
883}
884
885
886
887
888
889
890
891
892int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
893{
894 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
895}
896
897
898
899
900
901
902
903
904int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
905{
906 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
907}
908
909
910
911
912
913
914
915
916
917void __init_memblock __next_reserved_mem_region(u64 *idx,
918 phys_addr_t *out_start,
919 phys_addr_t *out_end)
920{
921 struct memblock_type *type = &memblock.reserved;
922
923 if (*idx < type->cnt) {
924 struct memblock_region *r = &type->regions[*idx];
925 phys_addr_t base = r->base;
926 phys_addr_t size = r->size;
927
928 if (out_start)
929 *out_start = base;
930 if (out_end)
931 *out_end = base + size - 1;
932
933 *idx += 1;
934 return;
935 }
936
937
938 *idx = ULLONG_MAX;
939}
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967void __init_memblock __next_mem_range(u64 *idx, int nid,
968 enum memblock_flags flags,
969 struct memblock_type *type_a,
970 struct memblock_type *type_b,
971 phys_addr_t *out_start,
972 phys_addr_t *out_end, int *out_nid)
973{
974 int idx_a = *idx & 0xffffffff;
975 int idx_b = *idx >> 32;
976
977 if (WARN_ONCE(nid == MAX_NUMNODES,
978 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
979 nid = NUMA_NO_NODE;
980
981 for (; idx_a < type_a->cnt; idx_a++) {
982 struct memblock_region *m = &type_a->regions[idx_a];
983
984 phys_addr_t m_start = m->base;
985 phys_addr_t m_end = m->base + m->size;
986 int m_nid = memblock_get_region_node(m);
987
988
989 if (nid != NUMA_NO_NODE && nid != m_nid)
990 continue;
991
992
993 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
994 continue;
995
996
997 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
998 continue;
999
1000
1001 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1002 continue;
1003
1004 if (!type_b) {
1005 if (out_start)
1006 *out_start = m_start;
1007 if (out_end)
1008 *out_end = m_end;
1009 if (out_nid)
1010 *out_nid = m_nid;
1011 idx_a++;
1012 *idx = (u32)idx_a | (u64)idx_b << 32;
1013 return;
1014 }
1015
1016
1017 for (; idx_b < type_b->cnt + 1; idx_b++) {
1018 struct memblock_region *r;
1019 phys_addr_t r_start;
1020 phys_addr_t r_end;
1021
1022 r = &type_b->regions[idx_b];
1023 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1024 r_end = idx_b < type_b->cnt ?
1025 r->base : PHYS_ADDR_MAX;
1026
1027
1028
1029
1030
1031 if (r_start >= m_end)
1032 break;
1033
1034 if (m_start < r_end) {
1035 if (out_start)
1036 *out_start =
1037 max(m_start, r_start);
1038 if (out_end)
1039 *out_end = min(m_end, r_end);
1040 if (out_nid)
1041 *out_nid = m_nid;
1042
1043
1044
1045
1046 if (m_end <= r_end)
1047 idx_a++;
1048 else
1049 idx_b++;
1050 *idx = (u32)idx_a | (u64)idx_b << 32;
1051 return;
1052 }
1053 }
1054 }
1055
1056
1057 *idx = ULLONG_MAX;
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1078 enum memblock_flags flags,
1079 struct memblock_type *type_a,
1080 struct memblock_type *type_b,
1081 phys_addr_t *out_start,
1082 phys_addr_t *out_end, int *out_nid)
1083{
1084 int idx_a = *idx & 0xffffffff;
1085 int idx_b = *idx >> 32;
1086
1087 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1088 nid = NUMA_NO_NODE;
1089
1090 if (*idx == (u64)ULLONG_MAX) {
1091 idx_a = type_a->cnt - 1;
1092 if (type_b != NULL)
1093 idx_b = type_b->cnt;
1094 else
1095 idx_b = 0;
1096 }
1097
1098 for (; idx_a >= 0; idx_a--) {
1099 struct memblock_region *m = &type_a->regions[idx_a];
1100
1101 phys_addr_t m_start = m->base;
1102 phys_addr_t m_end = m->base + m->size;
1103 int m_nid = memblock_get_region_node(m);
1104
1105
1106 if (nid != NUMA_NO_NODE && nid != m_nid)
1107 continue;
1108
1109
1110 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1111 continue;
1112
1113
1114 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1115 continue;
1116
1117
1118 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1119 continue;
1120
1121 if (!type_b) {
1122 if (out_start)
1123 *out_start = m_start;
1124 if (out_end)
1125 *out_end = m_end;
1126 if (out_nid)
1127 *out_nid = m_nid;
1128 idx_a--;
1129 *idx = (u32)idx_a | (u64)idx_b << 32;
1130 return;
1131 }
1132
1133
1134 for (; idx_b >= 0; idx_b--) {
1135 struct memblock_region *r;
1136 phys_addr_t r_start;
1137 phys_addr_t r_end;
1138
1139 r = &type_b->regions[idx_b];
1140 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1141 r_end = idx_b < type_b->cnt ?
1142 r->base : PHYS_ADDR_MAX;
1143
1144
1145
1146
1147
1148 if (r_end <= m_start)
1149 break;
1150
1151 if (m_end > r_start) {
1152 if (out_start)
1153 *out_start = max(m_start, r_start);
1154 if (out_end)
1155 *out_end = min(m_end, r_end);
1156 if (out_nid)
1157 *out_nid = m_nid;
1158 if (m_start >= r_start)
1159 idx_a--;
1160 else
1161 idx_b--;
1162 *idx = (u32)idx_a | (u64)idx_b << 32;
1163 return;
1164 }
1165 }
1166 }
1167
1168 *idx = ULLONG_MAX;
1169}
1170
1171#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1172
1173
1174
1175void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1176 unsigned long *out_start_pfn,
1177 unsigned long *out_end_pfn, int *out_nid)
1178{
1179 struct memblock_type *type = &memblock.memory;
1180 struct memblock_region *r;
1181
1182 while (++*idx < type->cnt) {
1183 r = &type->regions[*idx];
1184
1185 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1186 continue;
1187 if (nid == MAX_NUMNODES || nid == r->nid)
1188 break;
1189 }
1190 if (*idx >= type->cnt) {
1191 *idx = -1;
1192 return;
1193 }
1194
1195 if (out_start_pfn)
1196 *out_start_pfn = PFN_UP(r->base);
1197 if (out_end_pfn)
1198 *out_end_pfn = PFN_DOWN(r->base + r->size);
1199 if (out_nid)
1200 *out_nid = r->nid;
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1217 struct memblock_type *type, int nid)
1218{
1219 int start_rgn, end_rgn;
1220 int i, ret;
1221
1222 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1223 if (ret)
1224 return ret;
1225
1226 for (i = start_rgn; i < end_rgn; i++)
1227 memblock_set_region_node(&type->regions[i], nid);
1228
1229 memblock_merge_regions(type);
1230 return 0;
1231}
1232#endif
1233
1234static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1235 phys_addr_t align, phys_addr_t start,
1236 phys_addr_t end, int nid,
1237 enum memblock_flags flags)
1238{
1239 phys_addr_t found;
1240
1241 if (!align)
1242 align = SMP_CACHE_BYTES;
1243
1244 found = memblock_find_in_range_node(size, align, start, end, nid,
1245 flags);
1246 if (found && !memblock_reserve(found, size)) {
1247
1248
1249
1250
1251 kmemleak_alloc_phys(found, size, 0, 0);
1252 return found;
1253 }
1254 return 0;
1255}
1256
1257phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1258 phys_addr_t start, phys_addr_t end,
1259 enum memblock_flags flags)
1260{
1261 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1262 flags);
1263}
1264
1265phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1266 phys_addr_t align, phys_addr_t max_addr,
1267 int nid, enum memblock_flags flags)
1268{
1269 return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1270}
1271
1272phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1273{
1274 enum memblock_flags flags = choose_memblock_flags();
1275 phys_addr_t ret;
1276
1277again:
1278 ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1279 nid, flags);
1280
1281 if (!ret && (flags & MEMBLOCK_MIRROR)) {
1282 flags &= ~MEMBLOCK_MIRROR;
1283 goto again;
1284 }
1285 return ret;
1286}
1287
1288phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1289{
1290 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1291 MEMBLOCK_NONE);
1292}
1293
1294phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1295{
1296 phys_addr_t alloc;
1297
1298 alloc = __memblock_alloc_base(size, align, max_addr);
1299
1300 if (alloc == 0)
1301 panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
1302 &size, &max_addr);
1303
1304 return alloc;
1305}
1306
1307phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
1308{
1309 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1310}
1311
1312phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1313{
1314 phys_addr_t res = memblock_alloc_nid(size, align, nid);
1315
1316 if (res)
1317 return res;
1318 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1319}
1320
1321#if defined(CONFIG_NO_BOOTMEM)
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349static void * __init memblock_virt_alloc_internal(
1350 phys_addr_t size, phys_addr_t align,
1351 phys_addr_t min_addr, phys_addr_t max_addr,
1352 int nid)
1353{
1354 phys_addr_t alloc;
1355 void *ptr;
1356 enum memblock_flags flags = choose_memblock_flags();
1357
1358 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1359 nid = NUMA_NO_NODE;
1360
1361
1362
1363
1364
1365
1366 if (WARN_ON_ONCE(slab_is_available()))
1367 return kzalloc_node(size, GFP_NOWAIT, nid);
1368
1369 if (!align)
1370 align = SMP_CACHE_BYTES;
1371
1372 if (max_addr > memblock.current_limit)
1373 max_addr = memblock.current_limit;
1374again:
1375 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1376 nid, flags);
1377 if (alloc && !memblock_reserve(alloc, size))
1378 goto done;
1379
1380 if (nid != NUMA_NO_NODE) {
1381 alloc = memblock_find_in_range_node(size, align, min_addr,
1382 max_addr, NUMA_NO_NODE,
1383 flags);
1384 if (alloc && !memblock_reserve(alloc, size))
1385 goto done;
1386 }
1387
1388 if (min_addr) {
1389 min_addr = 0;
1390 goto again;
1391 }
1392
1393 if (flags & MEMBLOCK_MIRROR) {
1394 flags &= ~MEMBLOCK_MIRROR;
1395 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1396 &size);
1397 goto again;
1398 }
1399
1400 return NULL;
1401done:
1402 ptr = phys_to_virt(alloc);
1403
1404
1405
1406
1407
1408
1409
1410 kmemleak_alloc(ptr, size, 0, 0);
1411
1412 return ptr;
1413}
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434void * __init memblock_virt_alloc_try_nid_raw(
1435 phys_addr_t size, phys_addr_t align,
1436 phys_addr_t min_addr, phys_addr_t max_addr,
1437 int nid)
1438{
1439 void *ptr;
1440
1441 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1442 __func__, (u64)size, (u64)align, nid, &min_addr,
1443 &max_addr, (void *)_RET_IP_);
1444
1445 ptr = memblock_virt_alloc_internal(size, align,
1446 min_addr, max_addr, nid);
1447#ifdef CONFIG_DEBUG_VM
1448 if (ptr && size > 0)
1449 memset(ptr, PAGE_POISON_PATTERN, size);
1450#endif
1451 return ptr;
1452}
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471void * __init memblock_virt_alloc_try_nid_nopanic(
1472 phys_addr_t size, phys_addr_t align,
1473 phys_addr_t min_addr, phys_addr_t max_addr,
1474 int nid)
1475{
1476 void *ptr;
1477
1478 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1479 __func__, (u64)size, (u64)align, nid, &min_addr,
1480 &max_addr, (void *)_RET_IP_);
1481
1482 ptr = memblock_virt_alloc_internal(size, align,
1483 min_addr, max_addr, nid);
1484 if (ptr)
1485 memset(ptr, 0, size);
1486 return ptr;
1487}
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507void * __init memblock_virt_alloc_try_nid(
1508 phys_addr_t size, phys_addr_t align,
1509 phys_addr_t min_addr, phys_addr_t max_addr,
1510 int nid)
1511{
1512 void *ptr;
1513
1514 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1515 __func__, (u64)size, (u64)align, nid, &min_addr,
1516 &max_addr, (void *)_RET_IP_);
1517 ptr = memblock_virt_alloc_internal(size, align,
1518 min_addr, max_addr, nid);
1519 if (ptr) {
1520 memset(ptr, 0, size);
1521 return ptr;
1522 }
1523
1524 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa\n",
1525 __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr);
1526 return NULL;
1527}
1528#endif
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1539{
1540 phys_addr_t end = base + size - 1;
1541
1542 memblock_dbg("%s: [%pa-%pa] %pF\n",
1543 __func__, &base, &end, (void *)_RET_IP_);
1544 kmemleak_free_part_phys(base, size);
1545 memblock_remove_range(&memblock.reserved, base, size);
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1558{
1559 phys_addr_t cursor, end;
1560
1561 end = base + size - 1;
1562 memblock_dbg("%s: [%pa-%pa] %pF\n",
1563 __func__, &base, &end, (void *)_RET_IP_);
1564 kmemleak_free_part_phys(base, size);
1565 cursor = PFN_UP(base);
1566 end = PFN_DOWN(base + size);
1567
1568 for (; cursor < end; cursor++) {
1569 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1570 totalram_pages++;
1571 }
1572}
1573
1574
1575
1576
1577
1578phys_addr_t __init_memblock memblock_phys_mem_size(void)
1579{
1580 return memblock.memory.total_size;
1581}
1582
1583phys_addr_t __init_memblock memblock_reserved_size(void)
1584{
1585 return memblock.reserved.total_size;
1586}
1587
1588phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1589{
1590 unsigned long pages = 0;
1591 struct memblock_region *r;
1592 unsigned long start_pfn, end_pfn;
1593
1594 for_each_memblock(memory, r) {
1595 start_pfn = memblock_region_memory_base_pfn(r);
1596 end_pfn = memblock_region_memory_end_pfn(r);
1597 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1598 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1599 pages += end_pfn - start_pfn;
1600 }
1601
1602 return PFN_PHYS(pages);
1603}
1604
1605
1606phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1607{
1608 return memblock.memory.regions[0].base;
1609}
1610
1611phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1612{
1613 int idx = memblock.memory.cnt - 1;
1614
1615 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1616}
1617
1618static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1619{
1620 phys_addr_t max_addr = PHYS_ADDR_MAX;
1621 struct memblock_region *r;
1622
1623
1624
1625
1626
1627
1628 for_each_memblock(memory, r) {
1629 if (limit <= r->size) {
1630 max_addr = r->base + limit;
1631 break;
1632 }
1633 limit -= r->size;
1634 }
1635
1636 return max_addr;
1637}
1638
1639void __init memblock_enforce_memory_limit(phys_addr_t limit)
1640{
1641 phys_addr_t max_addr = PHYS_ADDR_MAX;
1642
1643 if (!limit)
1644 return;
1645
1646 max_addr = __find_max_addr(limit);
1647
1648
1649 if (max_addr == PHYS_ADDR_MAX)
1650 return;
1651
1652
1653 memblock_remove_range(&memblock.memory, max_addr,
1654 PHYS_ADDR_MAX);
1655 memblock_remove_range(&memblock.reserved, max_addr,
1656 PHYS_ADDR_MAX);
1657}
1658
1659void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1660{
1661 int start_rgn, end_rgn;
1662 int i, ret;
1663
1664 if (!size)
1665 return;
1666
1667 ret = memblock_isolate_range(&memblock.memory, base, size,
1668 &start_rgn, &end_rgn);
1669 if (ret)
1670 return;
1671
1672
1673 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1674 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1675 memblock_remove_region(&memblock.memory, i);
1676
1677 for (i = start_rgn - 1; i >= 0; i--)
1678 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1679 memblock_remove_region(&memblock.memory, i);
1680
1681
1682 memblock_remove_range(&memblock.reserved, 0, base);
1683 memblock_remove_range(&memblock.reserved,
1684 base + size, PHYS_ADDR_MAX);
1685}
1686
1687void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1688{
1689 phys_addr_t max_addr;
1690
1691 if (!limit)
1692 return;
1693
1694 max_addr = __find_max_addr(limit);
1695
1696
1697 if (max_addr == PHYS_ADDR_MAX)
1698 return;
1699
1700 memblock_cap_memory_range(0, max_addr);
1701}
1702
1703static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1704{
1705 unsigned int left = 0, right = type->cnt;
1706
1707 do {
1708 unsigned int mid = (right + left) / 2;
1709
1710 if (addr < type->regions[mid].base)
1711 right = mid;
1712 else if (addr >= (type->regions[mid].base +
1713 type->regions[mid].size))
1714 left = mid + 1;
1715 else
1716 return mid;
1717 } while (left < right);
1718 return -1;
1719}
1720
1721bool __init memblock_is_reserved(phys_addr_t addr)
1722{
1723 return memblock_search(&memblock.reserved, addr) != -1;
1724}
1725
1726bool __init_memblock memblock_is_memory(phys_addr_t addr)
1727{
1728 return memblock_search(&memblock.memory, addr) != -1;
1729}
1730
1731bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1732{
1733 int i = memblock_search(&memblock.memory, addr);
1734
1735 if (i == -1)
1736 return false;
1737 return !memblock_is_nomap(&memblock.memory.regions[i]);
1738}
1739
1740#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1741int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1742 unsigned long *start_pfn, unsigned long *end_pfn)
1743{
1744 struct memblock_type *type = &memblock.memory;
1745 int mid = memblock_search(type, PFN_PHYS(pfn));
1746
1747 if (mid == -1)
1748 return -1;
1749
1750 *start_pfn = PFN_DOWN(type->regions[mid].base);
1751 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1752
1753 return type->regions[mid].nid;
1754}
1755#endif
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1768{
1769 int idx = memblock_search(&memblock.memory, base);
1770 phys_addr_t end = base + memblock_cap_size(base, &size);
1771
1772 if (idx == -1)
1773 return false;
1774 return (memblock.memory.regions[idx].base +
1775 memblock.memory.regions[idx].size) >= end;
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1790{
1791 memblock_cap_size(base, &size);
1792 return memblock_overlaps_region(&memblock.reserved, base, size);
1793}
1794
1795void __init_memblock memblock_trim_memory(phys_addr_t align)
1796{
1797 phys_addr_t start, end, orig_start, orig_end;
1798 struct memblock_region *r;
1799
1800 for_each_memblock(memory, r) {
1801 orig_start = r->base;
1802 orig_end = r->base + r->size;
1803 start = round_up(orig_start, align);
1804 end = round_down(orig_end, align);
1805
1806 if (start == orig_start && end == orig_end)
1807 continue;
1808
1809 if (start < end) {
1810 r->base = start;
1811 r->size = end - start;
1812 } else {
1813 memblock_remove_region(&memblock.memory,
1814 r - memblock.memory.regions);
1815 r--;
1816 }
1817 }
1818}
1819
1820void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1821{
1822 memblock.current_limit = limit;
1823}
1824
1825phys_addr_t __init_memblock memblock_get_current_limit(void)
1826{
1827 return memblock.current_limit;
1828}
1829
1830static void __init_memblock memblock_dump(struct memblock_type *type)
1831{
1832 phys_addr_t base, end, size;
1833 enum memblock_flags flags;
1834 int idx;
1835 struct memblock_region *rgn;
1836
1837 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1838
1839 for_each_memblock_type(idx, type, rgn) {
1840 char nid_buf[32] = "";
1841
1842 base = rgn->base;
1843 size = rgn->size;
1844 end = base + size - 1;
1845 flags = rgn->flags;
1846#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1847 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1848 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1849 memblock_get_region_node(rgn));
1850#endif
1851 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1852 type->name, idx, &base, &end, &size, nid_buf, flags);
1853 }
1854}
1855
1856void __init_memblock __memblock_dump_all(void)
1857{
1858 pr_info("MEMBLOCK configuration:\n");
1859 pr_info(" memory size = %pa reserved size = %pa\n",
1860 &memblock.memory.total_size,
1861 &memblock.reserved.total_size);
1862
1863 memblock_dump(&memblock.memory);
1864 memblock_dump(&memblock.reserved);
1865#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1866 memblock_dump(&memblock.physmem);
1867#endif
1868}
1869
1870void __init memblock_allow_resize(void)
1871{
1872 memblock_can_resize = 1;
1873}
1874
1875static int __init early_memblock(char *p)
1876{
1877 if (p && strstr(p, "debug"))
1878 memblock_debug = 1;
1879 return 0;
1880}
1881early_param("memblock", early_memblock);
1882
1883#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1884
1885static int memblock_debug_show(struct seq_file *m, void *private)
1886{
1887 struct memblock_type *type = m->private;
1888 struct memblock_region *reg;
1889 int i;
1890 phys_addr_t end;
1891
1892 for (i = 0; i < type->cnt; i++) {
1893 reg = &type->regions[i];
1894 end = reg->base + reg->size - 1;
1895
1896 seq_printf(m, "%4d: ", i);
1897 seq_printf(m, "%pa..%pa\n", ®->base, &end);
1898 }
1899 return 0;
1900}
1901DEFINE_SHOW_ATTRIBUTE(memblock_debug);
1902
1903static int __init memblock_init_debugfs(void)
1904{
1905 struct dentry *root = debugfs_create_dir("memblock", NULL);
1906 if (!root)
1907 return -ENXIO;
1908 debugfs_create_file("memory", 0444, root,
1909 &memblock.memory, &memblock_debug_fops);
1910 debugfs_create_file("reserved", 0444, root,
1911 &memblock.reserved, &memblock_debug_fops);
1912#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1913 debugfs_create_file("physmem", 0444, root,
1914 &memblock.physmem, &memblock_debug_fops);
1915#endif
1916
1917 return 0;
1918}
1919__initcall(memblock_init_debugfs);
1920
1921#endif
1922