1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/errno.h>
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/cache.h>
15#include <linux/mman.h>
16#include <linux/nodemask.h>
17#include <linux/initrd.h>
18#include <linux/gfp.h>
19#include <linux/memblock.h>
20#include <linux/sort.h>
21#include <linux/of.h>
22#include <linux/of_fdt.h>
23#include <linux/dma-direct.h>
24#include <linux/dma-map-ops.h>
25#include <linux/efi.h>
26#include <linux/swiotlb.h>
27#include <linux/vmalloc.h>
28#include <linux/mm.h>
29#include <linux/kexec.h>
30#include <linux/crash_dump.h>
31#include <linux/hugetlb.h>
32#include <linux/acpi_iort.h>
33#include <linux/kmemleak.h>
34
35#include <asm/boot.h>
36#include <asm/fixmap.h>
37#include <asm/kasan.h>
38#include <asm/kernel-pgtable.h>
39#include <asm/kvm_host.h>
40#include <asm/memory.h>
41#include <asm/numa.h>
42#include <asm/sections.h>
43#include <asm/setup.h>
44#include <linux/sizes.h>
45#include <asm/tlb.h>
46#include <asm/alternative.h>
47#include <asm/xen/swiotlb-xen.h>
48
49
50
51
52
53
54
55s64 memstart_addr __ro_after_init = -1;
56EXPORT_SYMBOL(memstart_addr);
57
58
59
60
61
62
63
64
65phys_addr_t arm64_dma_phys_limit __ro_after_init;
66
67#ifdef CONFIG_KEXEC_CORE
68
69
70
71
72
73
74
75static void __init reserve_crashkernel(void)
76{
77 unsigned long long crash_base, crash_size;
78 unsigned long long crash_max = arm64_dma_phys_limit;
79 int ret;
80
81 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
82 &crash_size, &crash_base);
83
84 if (ret || !crash_size)
85 return;
86
87 crash_size = PAGE_ALIGN(crash_size);
88
89
90 if (crash_base)
91 crash_max = crash_base + crash_size;
92
93
94 crash_base = memblock_phys_alloc_range(crash_size, SZ_2M,
95 crash_base, crash_max);
96 if (!crash_base) {
97 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
98 crash_size);
99 return;
100 }
101
102 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
103 crash_base, crash_base + crash_size, crash_size >> 20);
104
105
106
107
108
109 kmemleak_ignore_phys(crash_base);
110 crashk_res.start = crash_base;
111 crashk_res.end = crash_base + crash_size - 1;
112}
113#else
114static void __init reserve_crashkernel(void)
115{
116}
117#endif
118
119
120
121
122
123
124static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
125{
126 phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
127 phys_addr_t phys_start = memblock_start_of_DRAM();
128
129 if (phys_start > U32_MAX)
130 zone_mask = PHYS_ADDR_MAX;
131 else if (phys_start > zone_mask)
132 zone_mask = U32_MAX;
133
134 return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
135}
136
137static void __init zone_sizes_init(unsigned long min, unsigned long max)
138{
139 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
140 unsigned int __maybe_unused acpi_zone_dma_bits;
141 unsigned int __maybe_unused dt_zone_dma_bits;
142 phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
143
144#ifdef CONFIG_ZONE_DMA
145 acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
146 dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
147 zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
148 arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
149 max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
150#endif
151#ifdef CONFIG_ZONE_DMA32
152 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
153 if (!arm64_dma_phys_limit)
154 arm64_dma_phys_limit = dma32_phys_limit;
155#endif
156 if (!arm64_dma_phys_limit)
157 arm64_dma_phys_limit = PHYS_MASK + 1;
158 max_zone_pfns[ZONE_NORMAL] = max;
159
160 free_area_init(max_zone_pfns);
161}
162
163int pfn_valid(unsigned long pfn)
164{
165 phys_addr_t addr = PFN_PHYS(pfn);
166 struct mem_section *ms;
167
168
169
170
171
172
173
174 if (PHYS_PFN(addr) != pfn)
175 return 0;
176
177 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
178 return 0;
179
180 ms = __pfn_to_section(pfn);
181 if (!valid_section(ms))
182 return 0;
183
184
185
186
187
188
189
190
191
192
193 if (!early_section(ms))
194 return pfn_section_valid(ms, pfn);
195
196 return memblock_is_memory(addr);
197}
198EXPORT_SYMBOL(pfn_valid);
199
200int pfn_is_map_memory(unsigned long pfn)
201{
202 phys_addr_t addr = PFN_PHYS(pfn);
203
204
205 if (PHYS_PFN(addr) != pfn)
206 return 0;
207
208 return memblock_is_map_memory(addr);
209}
210EXPORT_SYMBOL(pfn_is_map_memory);
211
212static phys_addr_t memory_limit = PHYS_ADDR_MAX;
213
214
215
216
217static int __init early_mem(char *p)
218{
219 if (!p)
220 return 1;
221
222 memory_limit = memparse(p, &p) & PAGE_MASK;
223 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
224
225 return 0;
226}
227early_param("mem", early_mem);
228
229void __init arm64_memblock_init(void)
230{
231 s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
232
233
234
235
236
237
238
239
240
241 if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
242 is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
243 pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
244 linear_region_size = min_t(u64, linear_region_size, BIT(51));
245 }
246
247
248 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
249
250
251
252
253 memstart_addr = round_down(memblock_start_of_DRAM(),
254 ARM64_MEMSTART_ALIGN);
255
256 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
257 pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
258
259
260
261
262
263
264 memblock_remove(max_t(u64, memstart_addr + linear_region_size,
265 __pa_symbol(_end)), ULLONG_MAX);
266 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
267
268 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
269 ARM64_MEMSTART_ALIGN);
270 memblock_remove(0, memstart_addr);
271 }
272
273
274
275
276
277
278
279
280 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
281 memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
282
283
284
285
286
287
288 if (memory_limit != PHYS_ADDR_MAX) {
289 memblock_mem_limit_remove_map(memory_limit);
290 memblock_add(__pa_symbol(_text), (u64)(_end - _text));
291 }
292
293 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
294
295
296
297
298
299 u64 base = phys_initrd_start & PAGE_MASK;
300 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
301
302
303
304
305
306
307
308
309
310 if (WARN(base < memblock_start_of_DRAM() ||
311 base + size > memblock_start_of_DRAM() +
312 linear_region_size,
313 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
314 phys_initrd_size = 0;
315 } else {
316 memblock_remove(base, size);
317 memblock_add(base, size);
318 memblock_reserve(base, size);
319 }
320 }
321
322 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
323 extern u16 memstart_offset_seed;
324 u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
325 int parange = cpuid_feature_extract_unsigned_field(
326 mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
327 s64 range = linear_region_size -
328 BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
329
330
331
332
333
334
335 if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
336 range /= ARM64_MEMSTART_ALIGN;
337 memstart_addr -= ARM64_MEMSTART_ALIGN *
338 ((range * memstart_offset_seed) >> 16);
339 }
340 }
341
342
343
344
345
346 memblock_reserve(__pa_symbol(_stext), _end - _stext);
347 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
348
349 initrd_start = __phys_to_virt(phys_initrd_start);
350 initrd_end = initrd_start + phys_initrd_size;
351 }
352
353 early_init_fdt_scan_reserved_mem();
354
355 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
356}
357
358void __init bootmem_init(void)
359{
360 unsigned long min, max;
361
362 min = PFN_UP(memblock_start_of_DRAM());
363 max = PFN_DOWN(memblock_end_of_DRAM());
364
365 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
366
367 max_pfn = max_low_pfn = max;
368 min_low_pfn = min;
369
370 arch_numa_init();
371
372
373
374
375
376
377#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
378 arm64_hugetlb_cma_reserve();
379#endif
380
381 dma_pernuma_cma_reserve();
382
383 kvm_hyp_reserve();
384
385
386
387
388
389 sparse_init();
390 zone_sizes_init(min, max);
391
392
393
394
395 dma_contiguous_reserve(arm64_dma_phys_limit);
396
397
398
399
400
401 reserve_crashkernel();
402
403 memblock_dump_all();
404}
405
406
407
408
409
410
411void __init mem_init(void)
412{
413 if (swiotlb_force == SWIOTLB_FORCE ||
414 max_pfn > PFN_DOWN(arm64_dma_phys_limit))
415 swiotlb_init(1);
416 else if (!xen_swiotlb_detect())
417 swiotlb_force = SWIOTLB_NO_FORCE;
418
419 set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
420
421
422 memblock_free_all();
423
424
425
426
427
428#ifdef CONFIG_COMPAT
429 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
430#endif
431
432
433
434
435
436 BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
437 CONFIG_PGTABLE_LEVELS);
438
439 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
440 extern int sysctl_overcommit_memory;
441
442
443
444
445 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
446 }
447}
448
449void free_initmem(void)
450{
451 free_reserved_area(lm_alias(__init_begin),
452 lm_alias(__init_end),
453 POISON_FREE_INITMEM, "unused kernel");
454
455
456
457
458
459 vunmap_range((u64)__init_begin, (u64)__init_end);
460}
461
462void dump_mem_limit(void)
463{
464 if (memory_limit != PHYS_ADDR_MAX) {
465 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
466 } else {
467 pr_emerg("Memory Limit: none\n");
468 }
469}
470