1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#undef DEBUG
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/highmem.h>
38#include <linux/idr.h>
39#include <linux/nodemask.h>
40#include <linux/module.h>
41#include <linux/poison.h>
42#include <linux/memblock.h>
43#include <linux/hugetlb.h>
44#include <linux/slab.h>
45#include <linux/of_fdt.h>
46#include <linux/libfdt.h>
47#include <linux/memremap.h>
48
49#include <asm/pgalloc.h>
50#include <asm/page.h>
51#include <asm/prom.h>
52#include <asm/rtas.h>
53#include <asm/io.h>
54#include <asm/mmu_context.h>
55#include <asm/pgtable.h>
56#include <asm/mmu.h>
57#include <linux/uaccess.h>
58#include <asm/smp.h>
59#include <asm/machdep.h>
60#include <asm/tlb.h>
61#include <asm/eeh.h>
62#include <asm/processor.h>
63#include <asm/mmzone.h>
64#include <asm/cputable.h>
65#include <asm/sections.h>
66#include <asm/iommu.h>
67#include <asm/vdso.h>
68
69#include "mmu_decl.h"
70
71#ifdef CONFIG_PPC_STD_MMU_64
72#if H_PGTABLE_RANGE > USER_VSID_RANGE
73#warning Limited user VSID range means pagetable space is wasted
74#endif
75#endif
76
77phys_addr_t memstart_addr = ~0;
78EXPORT_SYMBOL_GPL(memstart_addr);
79phys_addr_t kernstart_addr;
80EXPORT_SYMBOL_GPL(kernstart_addr);
81
82#ifdef CONFIG_SPARSEMEM_VMEMMAP
83
84
85
86
87
88
89static unsigned long __meminit vmemmap_section_start(unsigned long page)
90{
91 unsigned long offset = page - ((unsigned long)(vmemmap));
92
93
94 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
95}
96
97
98
99
100
101
102static int __meminit vmemmap_populated(unsigned long start, int page_size)
103{
104 unsigned long end = start + page_size;
105 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
106
107 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
108 if (pfn_valid(page_to_pfn((struct page *)start)))
109 return 1;
110
111 return 0;
112}
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127struct vmemmap_backing *vmemmap_list;
128static struct vmemmap_backing *next;
129
130
131
132
133
134
135
136
137static int num_left;
138static int num_freed;
139
140static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
141{
142 struct vmemmap_backing *vmem_back;
143
144 if (num_freed) {
145 num_freed--;
146 vmem_back = next;
147 next = next->list;
148
149 return vmem_back;
150 }
151
152
153 if (!num_left) {
154 next = vmemmap_alloc_block(PAGE_SIZE, node);
155 if (unlikely(!next)) {
156 WARN_ON(1);
157 return NULL;
158 }
159 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
160 }
161
162 num_left--;
163
164 return next++;
165}
166
167static __meminit void vmemmap_list_populate(unsigned long phys,
168 unsigned long start,
169 int node)
170{
171 struct vmemmap_backing *vmem_back;
172
173 vmem_back = vmemmap_list_alloc(node);
174 if (unlikely(!vmem_back)) {
175 WARN_ON(1);
176 return;
177 }
178
179 vmem_back->phys = phys;
180 vmem_back->virt_addr = start;
181 vmem_back->list = vmemmap_list;
182
183 vmemmap_list = vmem_back;
184}
185
186int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
187{
188 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
189
190
191 start = _ALIGN_DOWN(start, page_size);
192
193 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
194
195 for (; start < end; start += page_size) {
196 struct vmem_altmap *altmap;
197 void *p;
198 int rc;
199
200 if (vmemmap_populated(start, page_size))
201 continue;
202
203
204 altmap = to_vmem_altmap(SECTION_ALIGN_DOWN(start));
205
206 p = __vmemmap_alloc_block_buf(page_size, node, altmap);
207 if (!p)
208 return -ENOMEM;
209
210 vmemmap_list_populate(__pa(p), start, node);
211
212 pr_debug(" * %016lx..%016lx allocated at %p\n",
213 start, start + page_size, p);
214
215 rc = vmemmap_create_mapping(start, page_size, __pa(p));
216 if (rc < 0) {
217 pr_warning(
218 "vmemmap_populate: Unable to create vmemmap mapping: %d\n",
219 rc);
220 return -EFAULT;
221 }
222 }
223
224 return 0;
225}
226
227#ifdef CONFIG_MEMORY_HOTPLUG
228static unsigned long vmemmap_list_free(unsigned long start)
229{
230 struct vmemmap_backing *vmem_back, *vmem_back_prev;
231
232 vmem_back_prev = vmem_back = vmemmap_list;
233
234
235 for (; vmem_back; vmem_back = vmem_back->list) {
236 if (vmem_back->virt_addr == start)
237 break;
238 vmem_back_prev = vmem_back;
239 }
240
241 if (unlikely(!vmem_back)) {
242 WARN_ON(1);
243 return 0;
244 }
245
246
247 if (vmem_back == vmemmap_list)
248 vmemmap_list = vmem_back->list;
249 else
250 vmem_back_prev->list = vmem_back->list;
251
252
253 vmem_back->list = next;
254 next = vmem_back;
255 num_freed++;
256
257 return vmem_back->phys;
258}
259
260void __ref vmemmap_free(unsigned long start, unsigned long end)
261{
262 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
263 unsigned long page_order = get_order(page_size);
264
265 start = _ALIGN_DOWN(start, page_size);
266
267 pr_debug("vmemmap_free %lx...%lx\n", start, end);
268
269 for (; start < end; start += page_size) {
270 unsigned long nr_pages, addr;
271 struct vmem_altmap *altmap;
272 struct page *section_base;
273 struct page *page;
274
275
276
277
278
279
280 if (vmemmap_populated(start, page_size))
281 continue;
282
283 addr = vmemmap_list_free(start);
284 if (!addr)
285 continue;
286
287 page = pfn_to_page(addr >> PAGE_SHIFT);
288 section_base = pfn_to_page(vmemmap_section_start(start));
289 nr_pages = 1 << page_order;
290
291 altmap = to_vmem_altmap((unsigned long) section_base);
292 if (altmap) {
293 vmem_altmap_free(altmap, nr_pages);
294 } else if (PageReserved(page)) {
295
296 if (page_size < PAGE_SIZE) {
297
298
299
300
301 WARN_ON_ONCE(1);
302 } else {
303 while (nr_pages--)
304 free_reserved_page(page++);
305 }
306 } else {
307 free_pages((unsigned long)(__va(addr)), page_order);
308 }
309
310 vmemmap_remove_mapping(start, page_size);
311 }
312}
313#endif
314void register_page_bootmem_memmap(unsigned long section_nr,
315 struct page *start_page, unsigned long size)
316{
317}
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334struct page *realmode_pfn_to_page(unsigned long pfn)
335{
336 struct vmemmap_backing *vmem_back;
337 struct page *page;
338 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
339 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
340
341 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
342 if (pg_va < vmem_back->virt_addr)
343 continue;
344
345
346 if ((pg_va + sizeof(struct page)) <=
347 (vmem_back->virt_addr + page_size)) {
348 page = (struct page *) (vmem_back->phys + pg_va -
349 vmem_back->virt_addr);
350 return page;
351 }
352 }
353
354
355 return NULL;
356}
357EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
358
359#elif defined(CONFIG_FLATMEM)
360
361struct page *realmode_pfn_to_page(unsigned long pfn)
362{
363 struct page *page = pfn_to_page(pfn);
364 return page;
365}
366EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
367
368#endif
369
370#ifdef CONFIG_PPC_STD_MMU_64
371static bool disable_radix;
372static int __init parse_disable_radix(char *p)
373{
374 disable_radix = true;
375 return 0;
376}
377early_param("disable_radix", parse_disable_radix);
378
379
380
381
382
383
384static void early_check_vec5(void)
385{
386 unsigned long root, chosen;
387 int size;
388 const u8 *vec5;
389 u8 mmu_supported;
390
391 root = of_get_flat_dt_root();
392 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
393 if (chosen == -FDT_ERR_NOTFOUND) {
394 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
395 return;
396 }
397 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
398 if (!vec5) {
399 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
400 return;
401 }
402 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
403 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
404 return;
405 }
406
407
408 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
409 OV5_FEAT(OV5_MMU_SUPPORT);
410 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
411
412 if (!early_radix_enabled()) {
413 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
414 }
415 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
416 OV5_FEAT(OV5_RADIX_GTSE))) {
417 pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
418 }
419
420 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
421 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
422
423 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
424 }
425}
426
427void __init mmu_early_init_devtree(void)
428{
429
430 if (disable_radix)
431 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
432
433
434
435
436
437
438
439 if (!(mfmsr() & MSR_HV))
440 early_check_vec5();
441
442 if (early_radix_enabled())
443 radix__early_init_devtree();
444 else
445 hash__early_init_devtree();
446}
447#endif
448