1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#undef DEBUG
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/highmem.h>
38#include <linux/idr.h>
39#include <linux/nodemask.h>
40#include <linux/module.h>
41#include <linux/poison.h>
42#include <linux/memblock.h>
43#include <linux/hugetlb.h>
44#include <linux/slab.h>
45#include <linux/of_fdt.h>
46#include <linux/libfdt.h>
47#include <linux/memremap.h>
48
49#include <asm/pgalloc.h>
50#include <asm/page.h>
51#include <asm/prom.h>
52#include <asm/rtas.h>
53#include <asm/io.h>
54#include <asm/mmu_context.h>
55#include <asm/pgtable.h>
56#include <asm/mmu.h>
57#include <linux/uaccess.h>
58#include <asm/smp.h>
59#include <asm/machdep.h>
60#include <asm/tlb.h>
61#include <asm/eeh.h>
62#include <asm/processor.h>
63#include <asm/mmzone.h>
64#include <asm/cputable.h>
65#include <asm/sections.h>
66#include <asm/iommu.h>
67#include <asm/vdso.h>
68
69#include "mmu_decl.h"
70
71phys_addr_t memstart_addr = ~0;
72EXPORT_SYMBOL_GPL(memstart_addr);
73phys_addr_t kernstart_addr;
74EXPORT_SYMBOL_GPL(kernstart_addr);
75
76#ifdef CONFIG_SPARSEMEM_VMEMMAP
77
78
79
80
81
82
83static unsigned long __meminit vmemmap_section_start(unsigned long page)
84{
85 unsigned long offset = page - ((unsigned long)(vmemmap));
86
87
88 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
89}
90
91
92
93
94
95
96static int __meminit vmemmap_populated(unsigned long start, int page_size)
97{
98 unsigned long end = start + page_size;
99 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
100
101 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
102 if (pfn_valid(page_to_pfn((struct page *)start)))
103 return 1;
104
105 return 0;
106}
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121struct vmemmap_backing *vmemmap_list;
122static struct vmemmap_backing *next;
123
124
125
126
127
128
129
130
131static int num_left;
132static int num_freed;
133
134static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
135{
136 struct vmemmap_backing *vmem_back;
137
138 if (num_freed) {
139 num_freed--;
140 vmem_back = next;
141 next = next->list;
142
143 return vmem_back;
144 }
145
146
147 if (!num_left) {
148 next = vmemmap_alloc_block(PAGE_SIZE, node);
149 if (unlikely(!next)) {
150 WARN_ON(1);
151 return NULL;
152 }
153 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
154 }
155
156 num_left--;
157
158 return next++;
159}
160
161static __meminit void vmemmap_list_populate(unsigned long phys,
162 unsigned long start,
163 int node)
164{
165 struct vmemmap_backing *vmem_back;
166
167 vmem_back = vmemmap_list_alloc(node);
168 if (unlikely(!vmem_back)) {
169 WARN_ON(1);
170 return;
171 }
172
173 vmem_back->phys = phys;
174 vmem_back->virt_addr = start;
175 vmem_back->list = vmemmap_list;
176
177 vmemmap_list = vmem_back;
178}
179
180int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
181 struct vmem_altmap *altmap)
182{
183 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
184
185
186 start = _ALIGN_DOWN(start, page_size);
187
188 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
189
190 for (; start < end; start += page_size) {
191 void *p;
192 int rc;
193
194 if (vmemmap_populated(start, page_size))
195 continue;
196
197 if (altmap)
198 p = altmap_alloc_block_buf(page_size, altmap);
199 else
200 p = vmemmap_alloc_block_buf(page_size, node);
201 if (!p)
202 return -ENOMEM;
203
204 vmemmap_list_populate(__pa(p), start, node);
205
206 pr_debug(" * %016lx..%016lx allocated at %p\n",
207 start, start + page_size, p);
208
209 rc = vmemmap_create_mapping(start, page_size, __pa(p));
210 if (rc < 0) {
211 pr_warn("%s: Unable to create vmemmap mapping: %d\n",
212 __func__, rc);
213 return -EFAULT;
214 }
215 }
216
217 return 0;
218}
219
220#ifdef CONFIG_MEMORY_HOTPLUG
221static unsigned long vmemmap_list_free(unsigned long start)
222{
223 struct vmemmap_backing *vmem_back, *vmem_back_prev;
224
225 vmem_back_prev = vmem_back = vmemmap_list;
226
227
228 for (; vmem_back; vmem_back = vmem_back->list) {
229 if (vmem_back->virt_addr == start)
230 break;
231 vmem_back_prev = vmem_back;
232 }
233
234 if (unlikely(!vmem_back)) {
235 WARN_ON(1);
236 return 0;
237 }
238
239
240 if (vmem_back == vmemmap_list)
241 vmemmap_list = vmem_back->list;
242 else
243 vmem_back_prev->list = vmem_back->list;
244
245
246 vmem_back->list = next;
247 next = vmem_back;
248 num_freed++;
249
250 return vmem_back->phys;
251}
252
253void __ref vmemmap_free(unsigned long start, unsigned long end,
254 struct vmem_altmap *altmap)
255{
256 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
257 unsigned long page_order = get_order(page_size);
258
259 start = _ALIGN_DOWN(start, page_size);
260
261 pr_debug("vmemmap_free %lx...%lx\n", start, end);
262
263 for (; start < end; start += page_size) {
264 unsigned long nr_pages, addr;
265 struct page *section_base;
266 struct page *page;
267
268
269
270
271
272
273 if (vmemmap_populated(start, page_size))
274 continue;
275
276 addr = vmemmap_list_free(start);
277 if (!addr)
278 continue;
279
280 page = pfn_to_page(addr >> PAGE_SHIFT);
281 section_base = pfn_to_page(vmemmap_section_start(start));
282 nr_pages = 1 << page_order;
283
284 if (altmap) {
285 vmem_altmap_free(altmap, nr_pages);
286 } else if (PageReserved(page)) {
287
288 if (page_size < PAGE_SIZE) {
289
290
291
292
293 WARN_ON_ONCE(1);
294 } else {
295 while (nr_pages--)
296 free_reserved_page(page++);
297 }
298 } else {
299 free_pages((unsigned long)(__va(addr)), page_order);
300 }
301
302 vmemmap_remove_mapping(start, page_size);
303 }
304}
305#endif
306void register_page_bootmem_memmap(unsigned long section_nr,
307 struct page *start_page, unsigned long size)
308{
309}
310
311#endif
312
313#ifdef CONFIG_PPC_BOOK3S_64
314static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
315
316static int __init parse_disable_radix(char *p)
317{
318 bool val;
319
320 if (!p)
321 val = true;
322 else if (kstrtobool(p, &val))
323 return -EINVAL;
324
325 disable_radix = val;
326
327 return 0;
328}
329early_param("disable_radix", parse_disable_radix);
330
331
332
333
334
335
336static void __init early_check_vec5(void)
337{
338 unsigned long root, chosen;
339 int size;
340 const u8 *vec5;
341 u8 mmu_supported;
342
343 root = of_get_flat_dt_root();
344 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
345 if (chosen == -FDT_ERR_NOTFOUND) {
346 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
347 return;
348 }
349 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
350 if (!vec5) {
351 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
352 return;
353 }
354 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
355 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
356 return;
357 }
358
359
360 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
361 OV5_FEAT(OV5_MMU_SUPPORT);
362 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
363
364 if (!early_radix_enabled()) {
365 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
366 }
367 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
368 OV5_FEAT(OV5_RADIX_GTSE))) {
369 pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
370 }
371
372 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
373 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
374
375 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
376 }
377}
378
379void __init mmu_early_init_devtree(void)
380{
381
382 if (disable_radix)
383 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
384
385
386
387
388
389
390
391 if (!(mfmsr() & MSR_HV))
392 early_check_vec5();
393
394 if (early_radix_enabled())
395 radix__early_init_devtree();
396 else
397 hash__early_init_devtree();
398}
399#endif
400