1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/bitops.h>
17#include <linux/poison.h>
18#include <linux/pfn.h>
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
21#include <linux/memblock.h>
22
23#include <asm/sections.h>
24#include <linux/io.h>
25
26#include "internal.h"
27
28static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
29static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
30#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
31static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
32#endif
33
34struct memblock memblock __initdata_memblock = {
35 .memory.regions = memblock_memory_init_regions,
36 .memory.cnt = 1,
37 .memory.max = INIT_MEMBLOCK_REGIONS,
38 .memory.name = "memory",
39
40 .reserved.regions = memblock_reserved_init_regions,
41 .reserved.cnt = 1,
42 .reserved.max = INIT_MEMBLOCK_REGIONS,
43 .reserved.name = "reserved",
44
45#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
46 .physmem.regions = memblock_physmem_init_regions,
47 .physmem.cnt = 1,
48 .physmem.max = INIT_PHYSMEM_REGIONS,
49 .physmem.name = "physmem",
50#endif
51
52 .bottom_up = false,
53 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
54};
55
56int memblock_debug __initdata_memblock;
57static bool system_has_some_mirror __initdata_memblock = false;
58static int memblock_can_resize __initdata_memblock;
59static int memblock_memory_in_slab __initdata_memblock = 0;
60static int memblock_reserved_in_slab __initdata_memblock = 0;
61
62ulong __init_memblock choose_memblock_flags(void)
63{
64 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
65}
66
67
68static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
69{
70 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
71}
72
73
74
75
76static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
77 phys_addr_t base2, phys_addr_t size2)
78{
79 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
80}
81
82bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
83 phys_addr_t base, phys_addr_t size)
84{
85 unsigned long i;
86
87 for (i = 0; i < type->cnt; i++)
88 if (memblock_addrs_overlap(base, size, type->regions[i].base,
89 type->regions[i].size))
90 break;
91 return i < type->cnt;
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108static phys_addr_t __init_memblock
109__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
110 phys_addr_t size, phys_addr_t align, int nid,
111 ulong flags)
112{
113 phys_addr_t this_start, this_end, cand;
114 u64 i;
115
116 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
117 this_start = clamp(this_start, start, end);
118 this_end = clamp(this_end, start, end);
119
120 cand = round_up(this_start, align);
121 if (cand < this_end && this_end - cand >= size)
122 return cand;
123 }
124
125 return 0;
126}
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142static phys_addr_t __init_memblock
143__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
144 phys_addr_t size, phys_addr_t align, int nid,
145 ulong flags)
146{
147 phys_addr_t this_start, this_end, cand;
148 u64 i;
149
150 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
151 NULL) {
152 this_start = clamp(this_start, start, end);
153 this_end = clamp(this_end, start, end);
154
155 if (this_end < size)
156 continue;
157
158 cand = round_down(this_end - size, align);
159 if (cand >= this_start)
160 return cand;
161 }
162
163 return 0;
164}
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
189 phys_addr_t align, phys_addr_t start,
190 phys_addr_t end, int nid, ulong flags)
191{
192 phys_addr_t kernel_end, ret;
193
194
195 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
196 end = memblock.current_limit;
197
198
199 start = max_t(phys_addr_t, start, PAGE_SIZE);
200 end = max(start, end);
201 kernel_end = __pa_symbol(_end);
202
203
204
205
206
207 if (memblock_bottom_up() && end > kernel_end) {
208 phys_addr_t bottom_up_start;
209
210
211 bottom_up_start = max(start, kernel_end);
212
213
214 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
215 size, align, nid, flags);
216 if (ret)
217 return ret;
218
219
220
221
222
223
224
225
226
227
228
229 WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
230 }
231
232 return __memblock_find_range_top_down(start, end, size, align, nid,
233 flags);
234}
235
236
237
238
239
240
241
242
243
244
245
246
247
248phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
249 phys_addr_t end, phys_addr_t size,
250 phys_addr_t align)
251{
252 phys_addr_t ret;
253 ulong flags = choose_memblock_flags();
254
255again:
256 ret = memblock_find_in_range_node(size, align, start, end,
257 NUMA_NO_NODE, flags);
258
259 if (!ret && (flags & MEMBLOCK_MIRROR)) {
260 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
261 &size);
262 flags &= ~MEMBLOCK_MIRROR;
263 goto again;
264 }
265
266 return ret;
267}
268
269static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
270{
271 type->total_size -= type->regions[r].size;
272 memmove(&type->regions[r], &type->regions[r + 1],
273 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
274 type->cnt--;
275
276
277 if (type->cnt == 0) {
278 WARN_ON(type->total_size != 0);
279 type->cnt = 1;
280 type->regions[0].base = 0;
281 type->regions[0].size = 0;
282 type->regions[0].flags = 0;
283 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
284 }
285}
286
287#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
288
289
290
291void __init memblock_discard(void)
292{
293 phys_addr_t addr, size;
294
295 if (memblock.reserved.regions != memblock_reserved_init_regions) {
296 addr = __pa(memblock.reserved.regions);
297 size = PAGE_ALIGN(sizeof(struct memblock_region) *
298 memblock.reserved.max);
299 __memblock_free_late(addr, size);
300 }
301
302 if (memblock.memory.regions != memblock_memory_init_regions) {
303 addr = __pa(memblock.memory.regions);
304 size = PAGE_ALIGN(sizeof(struct memblock_region) *
305 memblock.memory.max);
306 __memblock_free_late(addr, size);
307 }
308}
309#endif
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326static int __init_memblock memblock_double_array(struct memblock_type *type,
327 phys_addr_t new_area_start,
328 phys_addr_t new_area_size)
329{
330 struct memblock_region *new_array, *old_array;
331 phys_addr_t old_alloc_size, new_alloc_size;
332 phys_addr_t old_size, new_size, addr;
333 int use_slab = slab_is_available();
334 int *in_slab;
335
336
337
338
339 if (!memblock_can_resize)
340 return -1;
341
342
343 old_size = type->max * sizeof(struct memblock_region);
344 new_size = old_size << 1;
345
346
347
348
349 old_alloc_size = PAGE_ALIGN(old_size);
350 new_alloc_size = PAGE_ALIGN(new_size);
351
352
353 if (type == &memblock.memory)
354 in_slab = &memblock_memory_in_slab;
355 else
356 in_slab = &memblock_reserved_in_slab;
357
358
359
360
361
362
363
364
365
366
367
368
369 if (use_slab) {
370 new_array = kmalloc(new_size, GFP_KERNEL);
371 addr = new_array ? __pa(new_array) : 0;
372 } else {
373
374 if (type != &memblock.reserved)
375 new_area_start = new_area_size = 0;
376
377 addr = memblock_find_in_range(new_area_start + new_area_size,
378 memblock.current_limit,
379 new_alloc_size, PAGE_SIZE);
380 if (!addr && new_area_size)
381 addr = memblock_find_in_range(0,
382 min(new_area_start, memblock.current_limit),
383 new_alloc_size, PAGE_SIZE);
384
385 new_array = addr ? __va(addr) : NULL;
386 }
387 if (!addr) {
388 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
389 type->name, type->max, type->max * 2);
390 return -1;
391 }
392
393 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
394 type->name, type->max * 2, (u64)addr,
395 (u64)addr + new_size - 1);
396
397
398
399
400
401
402 memcpy(new_array, type->regions, old_size);
403 memset(new_array + type->max, 0, old_size);
404 old_array = type->regions;
405 type->regions = new_array;
406 type->max <<= 1;
407
408
409 if (*in_slab)
410 kfree(old_array);
411 else if (old_array != memblock_memory_init_regions &&
412 old_array != memblock_reserved_init_regions)
413 memblock_free(__pa(old_array), old_alloc_size);
414
415
416
417
418
419 if (!use_slab)
420 BUG_ON(memblock_reserve(addr, new_alloc_size));
421
422
423 *in_slab = use_slab;
424
425 return 0;
426}
427
428
429
430
431
432
433
434static void __init_memblock memblock_merge_regions(struct memblock_type *type)
435{
436 int i = 0;
437
438
439 while (i < type->cnt - 1) {
440 struct memblock_region *this = &type->regions[i];
441 struct memblock_region *next = &type->regions[i + 1];
442
443 if (this->base + this->size != next->base ||
444 memblock_get_region_node(this) !=
445 memblock_get_region_node(next) ||
446 this->flags != next->flags) {
447 BUG_ON(this->base + this->size > next->base);
448 i++;
449 continue;
450 }
451
452 this->size += next->size;
453
454 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
455 type->cnt--;
456 }
457}
458
459
460
461
462
463
464
465
466
467
468
469
470
471static void __init_memblock memblock_insert_region(struct memblock_type *type,
472 int idx, phys_addr_t base,
473 phys_addr_t size,
474 int nid, unsigned long flags)
475{
476 struct memblock_region *rgn = &type->regions[idx];
477
478 BUG_ON(type->cnt >= type->max);
479 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
480 rgn->base = base;
481 rgn->size = size;
482 rgn->flags = flags;
483 memblock_set_region_node(rgn, nid);
484 type->cnt++;
485 type->total_size += size;
486}
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504int __init_memblock memblock_add_range(struct memblock_type *type,
505 phys_addr_t base, phys_addr_t size,
506 int nid, unsigned long flags)
507{
508 bool insert = false;
509 phys_addr_t obase = base;
510 phys_addr_t end = base + memblock_cap_size(base, &size);
511 int idx, nr_new;
512 struct memblock_region *rgn;
513
514 if (!size)
515 return 0;
516
517
518 if (type->regions[0].size == 0) {
519 WARN_ON(type->cnt != 1 || type->total_size);
520 type->regions[0].base = base;
521 type->regions[0].size = size;
522 type->regions[0].flags = flags;
523 memblock_set_region_node(&type->regions[0], nid);
524 type->total_size = size;
525 return 0;
526 }
527repeat:
528
529
530
531
532
533 base = obase;
534 nr_new = 0;
535
536 for_each_memblock_type(type, rgn) {
537 phys_addr_t rbase = rgn->base;
538 phys_addr_t rend = rbase + rgn->size;
539
540 if (rbase >= end)
541 break;
542 if (rend <= base)
543 continue;
544
545
546
547
548 if (rbase > base) {
549#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
550 WARN_ON(nid != memblock_get_region_node(rgn));
551#endif
552 WARN_ON(flags != rgn->flags);
553 nr_new++;
554 if (insert)
555 memblock_insert_region(type, idx++, base,
556 rbase - base, nid,
557 flags);
558 }
559
560 base = min(rend, end);
561 }
562
563
564 if (base < end) {
565 nr_new++;
566 if (insert)
567 memblock_insert_region(type, idx, base, end - base,
568 nid, flags);
569 }
570
571 if (!nr_new)
572 return 0;
573
574
575
576
577
578 if (!insert) {
579 while (type->cnt + nr_new > type->max)
580 if (memblock_double_array(type, obase, size) < 0)
581 return -ENOMEM;
582 insert = true;
583 goto repeat;
584 } else {
585 memblock_merge_regions(type);
586 return 0;
587 }
588}
589
590int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
591 int nid)
592{
593 return memblock_add_range(&memblock.memory, base, size, nid, 0);
594}
595
596int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
597{
598 phys_addr_t end = base + size - 1;
599
600 memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
601 &base, &end, (void *)_RET_IP_);
602
603 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
604}
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622static int __init_memblock memblock_isolate_range(struct memblock_type *type,
623 phys_addr_t base, phys_addr_t size,
624 int *start_rgn, int *end_rgn)
625{
626 phys_addr_t end = base + memblock_cap_size(base, &size);
627 int idx;
628 struct memblock_region *rgn;
629
630 *start_rgn = *end_rgn = 0;
631
632 if (!size)
633 return 0;
634
635
636 while (type->cnt + 2 > type->max)
637 if (memblock_double_array(type, base, size) < 0)
638 return -ENOMEM;
639
640 for_each_memblock_type(type, rgn) {
641 phys_addr_t rbase = rgn->base;
642 phys_addr_t rend = rbase + rgn->size;
643
644 if (rbase >= end)
645 break;
646 if (rend <= base)
647 continue;
648
649 if (rbase < base) {
650
651
652
653
654 rgn->base = base;
655 rgn->size -= base - rbase;
656 type->total_size -= base - rbase;
657 memblock_insert_region(type, idx, rbase, base - rbase,
658 memblock_get_region_node(rgn),
659 rgn->flags);
660 } else if (rend > end) {
661
662
663
664
665 rgn->base = end;
666 rgn->size -= end - rbase;
667 type->total_size -= end - rbase;
668 memblock_insert_region(type, idx--, rbase, end - rbase,
669 memblock_get_region_node(rgn),
670 rgn->flags);
671 } else {
672
673 if (!*end_rgn)
674 *start_rgn = idx;
675 *end_rgn = idx + 1;
676 }
677 }
678
679 return 0;
680}
681
682static int __init_memblock memblock_remove_range(struct memblock_type *type,
683 phys_addr_t base, phys_addr_t size)
684{
685 int start_rgn, end_rgn;
686 int i, ret;
687
688 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
689 if (ret)
690 return ret;
691
692 for (i = end_rgn - 1; i >= start_rgn; i--)
693 memblock_remove_region(type, i);
694 return 0;
695}
696
697int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
698{
699 return memblock_remove_range(&memblock.memory, base, size);
700}
701
702
703int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
704{
705 phys_addr_t end = base + size - 1;
706
707 memblock_dbg(" memblock_free: [%pa-%pa] %pF\n",
708 &base, &end, (void *)_RET_IP_);
709
710 kmemleak_free_part_phys(base, size);
711 return memblock_remove_range(&memblock.reserved, base, size);
712}
713
714int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
715{
716 phys_addr_t end = base + size - 1;
717
718 memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
719 &base, &end, (void *)_RET_IP_);
720
721 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
722}
723
724
725
726
727
728
729
730static int __init_memblock memblock_setclr_flag(phys_addr_t base,
731 phys_addr_t size, int set, int flag)
732{
733 struct memblock_type *type = &memblock.memory;
734 int i, ret, start_rgn, end_rgn;
735
736 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
737 if (ret)
738 return ret;
739
740 for (i = start_rgn; i < end_rgn; i++)
741 if (set)
742 memblock_set_region_flags(&type->regions[i], flag);
743 else
744 memblock_clear_region_flags(&type->regions[i], flag);
745
746 memblock_merge_regions(type);
747 return 0;
748}
749
750
751
752
753
754
755
756
757int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
758{
759 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
760}
761
762
763
764
765
766
767
768
769int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
770{
771 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
772}
773
774
775
776
777
778
779
780
781int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
782{
783 system_has_some_mirror = true;
784
785 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
786}
787
788
789
790
791
792
793
794
795int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
796{
797 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
798}
799
800
801
802
803
804
805
806
807int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
808{
809 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
810}
811
812
813
814
815
816
817
818
819
820void __init_memblock __next_reserved_mem_region(u64 *idx,
821 phys_addr_t *out_start,
822 phys_addr_t *out_end)
823{
824 struct memblock_type *type = &memblock.reserved;
825
826 if (*idx < type->cnt) {
827 struct memblock_region *r = &type->regions[*idx];
828 phys_addr_t base = r->base;
829 phys_addr_t size = r->size;
830
831 if (out_start)
832 *out_start = base;
833 if (out_end)
834 *out_end = base + size - 1;
835
836 *idx += 1;
837 return;
838 }
839
840
841 *idx = ULLONG_MAX;
842}
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
871 struct memblock_type *type_a,
872 struct memblock_type *type_b,
873 phys_addr_t *out_start,
874 phys_addr_t *out_end, int *out_nid)
875{
876 int idx_a = *idx & 0xffffffff;
877 int idx_b = *idx >> 32;
878
879 if (WARN_ONCE(nid == MAX_NUMNODES,
880 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
881 nid = NUMA_NO_NODE;
882
883 for (; idx_a < type_a->cnt; idx_a++) {
884 struct memblock_region *m = &type_a->regions[idx_a];
885
886 phys_addr_t m_start = m->base;
887 phys_addr_t m_end = m->base + m->size;
888 int m_nid = memblock_get_region_node(m);
889
890
891 if (nid != NUMA_NO_NODE && nid != m_nid)
892 continue;
893
894
895 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
896 continue;
897
898
899 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
900 continue;
901
902
903 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
904 continue;
905
906 if (!type_b) {
907 if (out_start)
908 *out_start = m_start;
909 if (out_end)
910 *out_end = m_end;
911 if (out_nid)
912 *out_nid = m_nid;
913 idx_a++;
914 *idx = (u32)idx_a | (u64)idx_b << 32;
915 return;
916 }
917
918
919 for (; idx_b < type_b->cnt + 1; idx_b++) {
920 struct memblock_region *r;
921 phys_addr_t r_start;
922 phys_addr_t r_end;
923
924 r = &type_b->regions[idx_b];
925 r_start = idx_b ? r[-1].base + r[-1].size : 0;
926 r_end = idx_b < type_b->cnt ?
927 r->base : ULLONG_MAX;
928
929
930
931
932
933 if (r_start >= m_end)
934 break;
935
936 if (m_start < r_end) {
937 if (out_start)
938 *out_start =
939 max(m_start, r_start);
940 if (out_end)
941 *out_end = min(m_end, r_end);
942 if (out_nid)
943 *out_nid = m_nid;
944
945
946
947
948 if (m_end <= r_end)
949 idx_a++;
950 else
951 idx_b++;
952 *idx = (u32)idx_a | (u64)idx_b << 32;
953 return;
954 }
955 }
956 }
957
958
959 *idx = ULLONG_MAX;
960}
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
980 struct memblock_type *type_a,
981 struct memblock_type *type_b,
982 phys_addr_t *out_start,
983 phys_addr_t *out_end, int *out_nid)
984{
985 int idx_a = *idx & 0xffffffff;
986 int idx_b = *idx >> 32;
987
988 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
989 nid = NUMA_NO_NODE;
990
991 if (*idx == (u64)ULLONG_MAX) {
992 idx_a = type_a->cnt - 1;
993 if (type_b != NULL)
994 idx_b = type_b->cnt;
995 else
996 idx_b = 0;
997 }
998
999 for (; idx_a >= 0; idx_a--) {
1000 struct memblock_region *m = &type_a->regions[idx_a];
1001
1002 phys_addr_t m_start = m->base;
1003 phys_addr_t m_end = m->base + m->size;
1004 int m_nid = memblock_get_region_node(m);
1005
1006
1007 if (nid != NUMA_NO_NODE && nid != m_nid)
1008 continue;
1009
1010
1011 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1012 continue;
1013
1014
1015 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1016 continue;
1017
1018
1019 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1020 continue;
1021
1022 if (!type_b) {
1023 if (out_start)
1024 *out_start = m_start;
1025 if (out_end)
1026 *out_end = m_end;
1027 if (out_nid)
1028 *out_nid = m_nid;
1029 idx_a--;
1030 *idx = (u32)idx_a | (u64)idx_b << 32;
1031 return;
1032 }
1033
1034
1035 for (; idx_b >= 0; idx_b--) {
1036 struct memblock_region *r;
1037 phys_addr_t r_start;
1038 phys_addr_t r_end;
1039
1040 r = &type_b->regions[idx_b];
1041 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1042 r_end = idx_b < type_b->cnt ?
1043 r->base : ULLONG_MAX;
1044
1045
1046
1047
1048
1049 if (r_end <= m_start)
1050 break;
1051
1052 if (m_end > r_start) {
1053 if (out_start)
1054 *out_start = max(m_start, r_start);
1055 if (out_end)
1056 *out_end = min(m_end, r_end);
1057 if (out_nid)
1058 *out_nid = m_nid;
1059 if (m_start >= r_start)
1060 idx_a--;
1061 else
1062 idx_b--;
1063 *idx = (u32)idx_a | (u64)idx_b << 32;
1064 return;
1065 }
1066 }
1067 }
1068
1069 *idx = ULLONG_MAX;
1070}
1071
1072#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1073
1074
1075
1076void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1077 unsigned long *out_start_pfn,
1078 unsigned long *out_end_pfn, int *out_nid)
1079{
1080 struct memblock_type *type = &memblock.memory;
1081 struct memblock_region *r;
1082
1083 while (++*idx < type->cnt) {
1084 r = &type->regions[*idx];
1085
1086 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1087 continue;
1088 if (nid == MAX_NUMNODES || nid == r->nid)
1089 break;
1090 }
1091 if (*idx >= type->cnt) {
1092 *idx = -1;
1093 return;
1094 }
1095
1096 if (out_start_pfn)
1097 *out_start_pfn = PFN_UP(r->base);
1098 if (out_end_pfn)
1099 *out_end_pfn = PFN_DOWN(r->base + r->size);
1100 if (out_nid)
1101 *out_nid = r->nid;
1102}
1103
1104unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
1105 unsigned long max_pfn)
1106{
1107 struct memblock_type *type = &memblock.memory;
1108 unsigned int right = type->cnt;
1109 unsigned int mid, left = 0;
1110 phys_addr_t addr = PFN_PHYS(pfn + 1);
1111
1112 do {
1113 mid = (right + left) / 2;
1114
1115 if (addr < type->regions[mid].base)
1116 right = mid;
1117 else if (addr >= (type->regions[mid].base +
1118 type->regions[mid].size))
1119 left = mid + 1;
1120 else {
1121
1122 return min(pfn + 1, max_pfn);
1123 }
1124 } while (left < right);
1125
1126 if (right == type->cnt)
1127 return max_pfn;
1128 else
1129 return min(PHYS_PFN(type->regions[right].base), max_pfn);
1130}
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1146 struct memblock_type *type, int nid)
1147{
1148 int start_rgn, end_rgn;
1149 int i, ret;
1150
1151 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1152 if (ret)
1153 return ret;
1154
1155 for (i = start_rgn; i < end_rgn; i++)
1156 memblock_set_region_node(&type->regions[i], nid);
1157
1158 memblock_merge_regions(type);
1159 return 0;
1160}
1161#endif
1162
1163static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1164 phys_addr_t align, phys_addr_t start,
1165 phys_addr_t end, int nid, ulong flags)
1166{
1167 phys_addr_t found;
1168
1169 if (!align)
1170 align = SMP_CACHE_BYTES;
1171
1172 found = memblock_find_in_range_node(size, align, start, end, nid,
1173 flags);
1174 if (found && !memblock_reserve(found, size)) {
1175
1176
1177
1178
1179 kmemleak_alloc_phys(found, size, 0, 0);
1180 return found;
1181 }
1182 return 0;
1183}
1184
1185phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1186 phys_addr_t start, phys_addr_t end,
1187 ulong flags)
1188{
1189 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1190 flags);
1191}
1192
1193static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1194 phys_addr_t align, phys_addr_t max_addr,
1195 int nid, ulong flags)
1196{
1197 return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1198}
1199
1200phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1201{
1202 ulong flags = choose_memblock_flags();
1203 phys_addr_t ret;
1204
1205again:
1206 ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1207 nid, flags);
1208
1209 if (!ret && (flags & MEMBLOCK_MIRROR)) {
1210 flags &= ~MEMBLOCK_MIRROR;
1211 goto again;
1212 }
1213 return ret;
1214}
1215
1216phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1217{
1218 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1219 MEMBLOCK_NONE);
1220}
1221
1222phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1223{
1224 phys_addr_t alloc;
1225
1226 alloc = __memblock_alloc_base(size, align, max_addr);
1227
1228 if (alloc == 0)
1229 panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
1230 &size, &max_addr);
1231
1232 return alloc;
1233}
1234
1235phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
1236{
1237 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1238}
1239
1240phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1241{
1242 phys_addr_t res = memblock_alloc_nid(size, align, nid);
1243
1244 if (res)
1245 return res;
1246 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1247}
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static void * __init memblock_virt_alloc_internal(
1277 phys_addr_t size, phys_addr_t align,
1278 phys_addr_t min_addr, phys_addr_t max_addr,
1279 int nid)
1280{
1281 phys_addr_t alloc;
1282 void *ptr;
1283 ulong flags = choose_memblock_flags();
1284
1285 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1286 nid = NUMA_NO_NODE;
1287
1288
1289
1290
1291
1292
1293 if (WARN_ON_ONCE(slab_is_available()))
1294 return kzalloc_node(size, GFP_NOWAIT, nid);
1295
1296 if (!align)
1297 align = SMP_CACHE_BYTES;
1298
1299 if (max_addr > memblock.current_limit)
1300 max_addr = memblock.current_limit;
1301again:
1302 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1303 nid, flags);
1304 if (alloc && !memblock_reserve(alloc, size))
1305 goto done;
1306
1307 if (nid != NUMA_NO_NODE) {
1308 alloc = memblock_find_in_range_node(size, align, min_addr,
1309 max_addr, NUMA_NO_NODE,
1310 flags);
1311 if (alloc && !memblock_reserve(alloc, size))
1312 goto done;
1313 }
1314
1315 if (min_addr) {
1316 min_addr = 0;
1317 goto again;
1318 }
1319
1320 if (flags & MEMBLOCK_MIRROR) {
1321 flags &= ~MEMBLOCK_MIRROR;
1322 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1323 &size);
1324 goto again;
1325 }
1326
1327 return NULL;
1328done:
1329 ptr = phys_to_virt(alloc);
1330 memset(ptr, 0, size);
1331
1332
1333
1334
1335
1336
1337
1338 kmemleak_alloc(ptr, size, 0, 0);
1339
1340 return ptr;
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360void * __init memblock_virt_alloc_try_nid_nopanic(
1361 phys_addr_t size, phys_addr_t align,
1362 phys_addr_t min_addr, phys_addr_t max_addr,
1363 int nid)
1364{
1365 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1366 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1367 (u64)max_addr, (void *)_RET_IP_);
1368 return memblock_virt_alloc_internal(size, align, min_addr,
1369 max_addr, nid);
1370}
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390void * __init memblock_virt_alloc_try_nid(
1391 phys_addr_t size, phys_addr_t align,
1392 phys_addr_t min_addr, phys_addr_t max_addr,
1393 int nid)
1394{
1395 void *ptr;
1396
1397 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1398 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1399 (u64)max_addr, (void *)_RET_IP_);
1400 ptr = memblock_virt_alloc_internal(size, align,
1401 min_addr, max_addr, nid);
1402 if (ptr)
1403 return ptr;
1404
1405 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1406 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1407 (u64)max_addr);
1408 return NULL;
1409}
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1420{
1421 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1422 __func__, (u64)base, (u64)base + size - 1,
1423 (void *)_RET_IP_);
1424 kmemleak_free_part_phys(base, size);
1425 memblock_remove_range(&memblock.reserved, base, size);
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1438{
1439 u64 cursor, end;
1440
1441 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1442 __func__, (u64)base, (u64)base + size - 1,
1443 (void *)_RET_IP_);
1444 kmemleak_free_part_phys(base, size);
1445 cursor = PFN_UP(base);
1446 end = PFN_DOWN(base + size);
1447
1448 for (; cursor < end; cursor++) {
1449 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1450 totalram_pages++;
1451 }
1452}
1453
1454
1455
1456
1457
1458phys_addr_t __init_memblock memblock_phys_mem_size(void)
1459{
1460 return memblock.memory.total_size;
1461}
1462
1463phys_addr_t __init_memblock memblock_reserved_size(void)
1464{
1465 return memblock.reserved.total_size;
1466}
1467
1468phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1469{
1470 unsigned long pages = 0;
1471 struct memblock_region *r;
1472 unsigned long start_pfn, end_pfn;
1473
1474 for_each_memblock(memory, r) {
1475 start_pfn = memblock_region_memory_base_pfn(r);
1476 end_pfn = memblock_region_memory_end_pfn(r);
1477 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1478 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1479 pages += end_pfn - start_pfn;
1480 }
1481
1482 return PFN_PHYS(pages);
1483}
1484
1485
1486phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1487{
1488 return memblock.memory.regions[0].base;
1489}
1490
1491phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1492{
1493 int idx = memblock.memory.cnt - 1;
1494
1495 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1496}
1497
1498static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1499{
1500 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1501 struct memblock_region *r;
1502
1503
1504
1505
1506
1507
1508 for_each_memblock(memory, r) {
1509 if (limit <= r->size) {
1510 max_addr = r->base + limit;
1511 break;
1512 }
1513 limit -= r->size;
1514 }
1515
1516 return max_addr;
1517}
1518
1519void __init memblock_enforce_memory_limit(phys_addr_t limit)
1520{
1521 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1522
1523 if (!limit)
1524 return;
1525
1526 max_addr = __find_max_addr(limit);
1527
1528
1529 if (max_addr == (phys_addr_t)ULLONG_MAX)
1530 return;
1531
1532
1533 memblock_remove_range(&memblock.memory, max_addr,
1534 (phys_addr_t)ULLONG_MAX);
1535 memblock_remove_range(&memblock.reserved, max_addr,
1536 (phys_addr_t)ULLONG_MAX);
1537}
1538
1539void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1540{
1541 int start_rgn, end_rgn;
1542 int i, ret;
1543
1544 if (!size)
1545 return;
1546
1547 ret = memblock_isolate_range(&memblock.memory, base, size,
1548 &start_rgn, &end_rgn);
1549 if (ret)
1550 return;
1551
1552
1553 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1554 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1555 memblock_remove_region(&memblock.memory, i);
1556
1557 for (i = start_rgn - 1; i >= 0; i--)
1558 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1559 memblock_remove_region(&memblock.memory, i);
1560
1561
1562 memblock_remove_range(&memblock.reserved, 0, base);
1563 memblock_remove_range(&memblock.reserved,
1564 base + size, (phys_addr_t)ULLONG_MAX);
1565}
1566
1567void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1568{
1569 phys_addr_t max_addr;
1570
1571 if (!limit)
1572 return;
1573
1574 max_addr = __find_max_addr(limit);
1575
1576
1577 if (max_addr == (phys_addr_t)ULLONG_MAX)
1578 return;
1579
1580 memblock_cap_memory_range(0, max_addr);
1581}
1582
1583static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1584{
1585 unsigned int left = 0, right = type->cnt;
1586
1587 do {
1588 unsigned int mid = (right + left) / 2;
1589
1590 if (addr < type->regions[mid].base)
1591 right = mid;
1592 else if (addr >= (type->regions[mid].base +
1593 type->regions[mid].size))
1594 left = mid + 1;
1595 else
1596 return mid;
1597 } while (left < right);
1598 return -1;
1599}
1600
1601bool __init memblock_is_reserved(phys_addr_t addr)
1602{
1603 return memblock_search(&memblock.reserved, addr) != -1;
1604}
1605
1606bool __init_memblock memblock_is_memory(phys_addr_t addr)
1607{
1608 return memblock_search(&memblock.memory, addr) != -1;
1609}
1610
1611int __init_memblock memblock_is_map_memory(phys_addr_t addr)
1612{
1613 int i = memblock_search(&memblock.memory, addr);
1614
1615 if (i == -1)
1616 return false;
1617 return !memblock_is_nomap(&memblock.memory.regions[i]);
1618}
1619
1620#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1621int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1622 unsigned long *start_pfn, unsigned long *end_pfn)
1623{
1624 struct memblock_type *type = &memblock.memory;
1625 int mid = memblock_search(type, PFN_PHYS(pfn));
1626
1627 if (mid == -1)
1628 return -1;
1629
1630 *start_pfn = PFN_DOWN(type->regions[mid].base);
1631 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1632
1633 return type->regions[mid].nid;
1634}
1635#endif
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1648{
1649 int idx = memblock_search(&memblock.memory, base);
1650 phys_addr_t end = base + memblock_cap_size(base, &size);
1651
1652 if (idx == -1)
1653 return 0;
1654 return (memblock.memory.regions[idx].base +
1655 memblock.memory.regions[idx].size) >= end;
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1669{
1670 memblock_cap_size(base, &size);
1671 return memblock_overlaps_region(&memblock.reserved, base, size);
1672}
1673
1674void __init_memblock memblock_trim_memory(phys_addr_t align)
1675{
1676 phys_addr_t start, end, orig_start, orig_end;
1677 struct memblock_region *r;
1678
1679 for_each_memblock(memory, r) {
1680 orig_start = r->base;
1681 orig_end = r->base + r->size;
1682 start = round_up(orig_start, align);
1683 end = round_down(orig_end, align);
1684
1685 if (start == orig_start && end == orig_end)
1686 continue;
1687
1688 if (start < end) {
1689 r->base = start;
1690 r->size = end - start;
1691 } else {
1692 memblock_remove_region(&memblock.memory,
1693 r - memblock.memory.regions);
1694 r--;
1695 }
1696 }
1697}
1698
1699void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1700{
1701 memblock.current_limit = limit;
1702}
1703
1704phys_addr_t __init_memblock memblock_get_current_limit(void)
1705{
1706 return memblock.current_limit;
1707}
1708
1709static void __init_memblock memblock_dump(struct memblock_type *type)
1710{
1711 phys_addr_t base, end, size;
1712 unsigned long flags;
1713 int idx;
1714 struct memblock_region *rgn;
1715
1716 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1717
1718 for_each_memblock_type(type, rgn) {
1719 char nid_buf[32] = "";
1720
1721 base = rgn->base;
1722 size = rgn->size;
1723 end = base + size - 1;
1724 flags = rgn->flags;
1725#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1726 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1727 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1728 memblock_get_region_node(rgn));
1729#endif
1730 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
1731 type->name, idx, &base, &end, &size, nid_buf, flags);
1732 }
1733}
1734
1735extern unsigned long __init_memblock
1736memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
1737{
1738 struct memblock_region *rgn;
1739 unsigned long size = 0;
1740 int idx;
1741
1742 for_each_memblock_type((&memblock.reserved), rgn) {
1743 phys_addr_t start, end;
1744
1745 if (rgn->base + rgn->size < start_addr)
1746 continue;
1747 if (rgn->base > end_addr)
1748 continue;
1749
1750 start = rgn->base;
1751 end = start + rgn->size;
1752 size += end - start;
1753 }
1754
1755 return size;
1756}
1757
1758void __init_memblock __memblock_dump_all(void)
1759{
1760 pr_info("MEMBLOCK configuration:\n");
1761 pr_info(" memory size = %pa reserved size = %pa\n",
1762 &memblock.memory.total_size,
1763 &memblock.reserved.total_size);
1764
1765 memblock_dump(&memblock.memory);
1766 memblock_dump(&memblock.reserved);
1767#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1768 memblock_dump(&memblock.physmem);
1769#endif
1770}
1771
1772void __init memblock_allow_resize(void)
1773{
1774 memblock_can_resize = 1;
1775}
1776
1777static int __init early_memblock(char *p)
1778{
1779 if (p && strstr(p, "debug"))
1780 memblock_debug = 1;
1781 return 0;
1782}
1783early_param("memblock", early_memblock);
1784
1785#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1786
1787static int memblock_debug_show(struct seq_file *m, void *private)
1788{
1789 struct memblock_type *type = m->private;
1790 struct memblock_region *reg;
1791 int i;
1792 phys_addr_t end;
1793
1794 for (i = 0; i < type->cnt; i++) {
1795 reg = &type->regions[i];
1796 end = reg->base + reg->size - 1;
1797
1798 seq_printf(m, "%4d: ", i);
1799 seq_printf(m, "%pa..%pa\n", ®->base, &end);
1800 }
1801 return 0;
1802}
1803
1804static int memblock_debug_open(struct inode *inode, struct file *file)
1805{
1806 return single_open(file, memblock_debug_show, inode->i_private);
1807}
1808
1809static const struct file_operations memblock_debug_fops = {
1810 .open = memblock_debug_open,
1811 .read = seq_read,
1812 .llseek = seq_lseek,
1813 .release = single_release,
1814};
1815
1816static int __init memblock_init_debugfs(void)
1817{
1818 struct dentry *root = debugfs_create_dir("memblock", NULL);
1819 if (!root)
1820 return -ENXIO;
1821 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1822 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1823#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1824 debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
1825#endif
1826
1827 return 0;
1828}
1829__initcall(memblock_init_debugfs);
1830
1831#endif
1832