1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#undef DEBUG
18
19#include <linux/signal.h>
20#include <linux/sched.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/types.h>
25#include <linux/mman.h>
26#include <linux/mm.h>
27#include <linux/swap.h>
28#include <linux/stddef.h>
29#include <linux/vmalloc.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/highmem.h>
33#include <linux/idr.h>
34#include <linux/nodemask.h>
35#include <linux/module.h>
36#include <linux/poison.h>
37#include <linux/memblock.h>
38#include <linux/hugetlb.h>
39#include <linux/slab.h>
40#include <linux/of_fdt.h>
41#include <linux/libfdt.h>
42#include <linux/memremap.h>
43
44#include <asm/pgalloc.h>
45#include <asm/page.h>
46#include <asm/prom.h>
47#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/mmu.h>
51#include <linux/uaccess.h>
52#include <asm/smp.h>
53#include <asm/machdep.h>
54#include <asm/tlb.h>
55#include <asm/eeh.h>
56#include <asm/processor.h>
57#include <asm/mmzone.h>
58#include <asm/cputable.h>
59#include <asm/sections.h>
60#include <asm/iommu.h>
61#include <asm/vdso.h>
62
63#include <mm/mmu_decl.h>
64
65#ifdef CONFIG_SPARSEMEM_VMEMMAP
66
67
68
69
70
71
72static struct page * __meminit vmemmap_subsection_start(unsigned long vmemmap_addr)
73{
74 unsigned long start_pfn;
75 unsigned long offset = vmemmap_addr - ((unsigned long)(vmemmap));
76
77
78 start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK;
79 return pfn_to_page(start_pfn);
80}
81
82
83
84
85
86
87
88
89
90
91
92
93
94static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size)
95{
96 struct page *start;
97 unsigned long vmemmap_end = vmemmap_addr + vmemmap_map_size;
98 start = vmemmap_subsection_start(vmemmap_addr);
99
100 for (; (unsigned long)start < vmemmap_end; start += PAGES_PER_SUBSECTION)
101
102
103
104
105
106 if (pfn_valid(page_to_pfn(start)))
107 return 1;
108
109 return 0;
110}
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125struct vmemmap_backing *vmemmap_list;
126static struct vmemmap_backing *next;
127
128
129
130
131
132
133
134
135static int num_left;
136static int num_freed;
137
138static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
139{
140 struct vmemmap_backing *vmem_back;
141
142 if (num_freed) {
143 num_freed--;
144 vmem_back = next;
145 next = next->list;
146
147 return vmem_back;
148 }
149
150
151 if (!num_left) {
152 next = vmemmap_alloc_block(PAGE_SIZE, node);
153 if (unlikely(!next)) {
154 WARN_ON(1);
155 return NULL;
156 }
157 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
158 }
159
160 num_left--;
161
162 return next++;
163}
164
165static __meminit int vmemmap_list_populate(unsigned long phys,
166 unsigned long start,
167 int node)
168{
169 struct vmemmap_backing *vmem_back;
170
171 vmem_back = vmemmap_list_alloc(node);
172 if (unlikely(!vmem_back)) {
173 pr_debug("vmemap list allocation failed\n");
174 return -ENOMEM;
175 }
176
177 vmem_back->phys = phys;
178 vmem_back->virt_addr = start;
179 vmem_back->list = vmemmap_list;
180
181 vmemmap_list = vmem_back;
182 return 0;
183}
184
185static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
186 unsigned long page_size)
187{
188 unsigned long nr_pfn = page_size / sizeof(struct page);
189 unsigned long start_pfn = page_to_pfn((struct page *)start);
190
191 if ((start_pfn + nr_pfn) > altmap->end_pfn)
192 return true;
193
194 if (start_pfn < altmap->base_pfn)
195 return true;
196
197 return false;
198}
199
200int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
201 struct vmem_altmap *altmap)
202{
203 bool altmap_alloc;
204 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
205
206
207 start = ALIGN_DOWN(start, page_size);
208
209 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
210
211 for (; start < end; start += page_size) {
212 void *p = NULL;
213 int rc;
214
215
216
217
218
219
220
221 if (vmemmap_populated(start, page_size))
222 continue;
223
224
225
226
227
228
229 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
230 p = vmemmap_alloc_block_buf(page_size, node, altmap);
231 if (!p)
232 pr_debug("altmap block allocation failed, falling back to system memory");
233 else
234 altmap_alloc = true;
235 }
236 if (!p) {
237 p = vmemmap_alloc_block_buf(page_size, node, NULL);
238 altmap_alloc = false;
239 }
240 if (!p)
241 return -ENOMEM;
242
243 if (vmemmap_list_populate(__pa(p), start, node)) {
244
245
246
247
248
249
250 int nr_pfns = page_size >> PAGE_SHIFT;
251 unsigned long page_order = get_order(page_size);
252
253 if (altmap_alloc)
254 vmem_altmap_free(altmap, nr_pfns);
255 else
256 free_pages((unsigned long)p, page_order);
257 return -ENOMEM;
258 }
259
260 pr_debug(" * %016lx..%016lx allocated at %p\n",
261 start, start + page_size, p);
262
263 rc = vmemmap_create_mapping(start, page_size, __pa(p));
264 if (rc < 0) {
265 pr_warn("%s: Unable to create vmemmap mapping: %d\n",
266 __func__, rc);
267 return -EFAULT;
268 }
269 }
270
271 return 0;
272}
273
274#ifdef CONFIG_MEMORY_HOTPLUG
275static unsigned long vmemmap_list_free(unsigned long start)
276{
277 struct vmemmap_backing *vmem_back, *vmem_back_prev;
278
279 vmem_back_prev = vmem_back = vmemmap_list;
280
281
282 for (; vmem_back; vmem_back = vmem_back->list) {
283 if (vmem_back->virt_addr == start)
284 break;
285 vmem_back_prev = vmem_back;
286 }
287
288 if (unlikely(!vmem_back))
289 return 0;
290
291
292 if (vmem_back == vmemmap_list)
293 vmemmap_list = vmem_back->list;
294 else
295 vmem_back_prev->list = vmem_back->list;
296
297
298 vmem_back->list = next;
299 next = vmem_back;
300 num_freed++;
301
302 return vmem_back->phys;
303}
304
305void __ref vmemmap_free(unsigned long start, unsigned long end,
306 struct vmem_altmap *altmap)
307{
308 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
309 unsigned long page_order = get_order(page_size);
310 unsigned long alt_start = ~0, alt_end = ~0;
311 unsigned long base_pfn;
312
313 start = ALIGN_DOWN(start, page_size);
314 if (altmap) {
315 alt_start = altmap->base_pfn;
316 alt_end = altmap->base_pfn + altmap->reserve +
317 altmap->free + altmap->alloc + altmap->align;
318 }
319
320 pr_debug("vmemmap_free %lx...%lx\n", start, end);
321
322 for (; start < end; start += page_size) {
323 unsigned long nr_pages, addr;
324 struct page *page;
325
326
327
328
329
330
331
332 if (vmemmap_populated(start, page_size))
333 continue;
334
335 addr = vmemmap_list_free(start);
336 if (!addr)
337 continue;
338
339 page = pfn_to_page(addr >> PAGE_SHIFT);
340 nr_pages = 1 << page_order;
341 base_pfn = PHYS_PFN(addr);
342
343 if (base_pfn >= alt_start && base_pfn < alt_end) {
344 vmem_altmap_free(altmap, nr_pages);
345 } else if (PageReserved(page)) {
346
347 if (page_size < PAGE_SIZE) {
348
349
350
351
352 WARN_ON_ONCE(1);
353 } else {
354 while (nr_pages--)
355 free_reserved_page(page++);
356 }
357 } else {
358 free_pages((unsigned long)(__va(addr)), page_order);
359 }
360
361 vmemmap_remove_mapping(start, page_size);
362 }
363}
364#endif
365void register_page_bootmem_memmap(unsigned long section_nr,
366 struct page *start_page, unsigned long size)
367{
368}
369
370#endif
371
372#ifdef CONFIG_PPC_BOOK3S_64
373static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
374
375static int __init parse_disable_radix(char *p)
376{
377 bool val;
378
379 if (!p)
380 val = true;
381 else if (kstrtobool(p, &val))
382 return -EINVAL;
383
384 disable_radix = val;
385
386 return 0;
387}
388early_param("disable_radix", parse_disable_radix);
389
390
391
392
393
394
395static void __init early_check_vec5(void)
396{
397 unsigned long root, chosen;
398 int size;
399 const u8 *vec5;
400 u8 mmu_supported;
401
402 root = of_get_flat_dt_root();
403 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
404 if (chosen == -FDT_ERR_NOTFOUND) {
405 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
406 return;
407 }
408 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
409 if (!vec5) {
410 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
411 return;
412 }
413 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
414 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
415 return;
416 }
417
418
419 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
420 OV5_FEAT(OV5_MMU_SUPPORT);
421 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
422
423 if (!early_radix_enabled()) {
424 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
425 }
426 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
427 OV5_FEAT(OV5_RADIX_GTSE))) {
428 cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
429 } else
430 cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
431
432 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
433 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
434
435 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
436 cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
437 }
438}
439
440void __init mmu_early_init_devtree(void)
441{
442
443 if (disable_radix)
444 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
445
446
447
448
449
450
451
452 if (!(mfmsr() & MSR_HV))
453 early_check_vec5();
454
455 if (early_radix_enabled()) {
456 radix__early_init_devtree();
457
458
459
460
461
462 ppc64_rma_size = ULONG_MAX;
463 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
464 } else
465 hash__early_init_devtree();
466}
467#endif
468