1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#undef DEBUG
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/highmem.h>
38#include <linux/idr.h>
39#include <linux/nodemask.h>
40#include <linux/module.h>
41#include <linux/poison.h>
42#include <linux/memblock.h>
43#include <linux/hugetlb.h>
44#include <linux/slab.h>
45
46#include <asm/pgalloc.h>
47#include <asm/page.h>
48#include <asm/prom.h>
49#include <asm/rtas.h>
50#include <asm/io.h>
51#include <asm/mmu_context.h>
52#include <asm/pgtable.h>
53#include <asm/mmu.h>
54#include <asm/uaccess.h>
55#include <asm/smp.h>
56#include <asm/machdep.h>
57#include <asm/tlb.h>
58#include <asm/eeh.h>
59#include <asm/processor.h>
60#include <asm/mmzone.h>
61#include <asm/cputable.h>
62#include <asm/sections.h>
63#include <asm/iommu.h>
64#include <asm/vdso.h>
65
66#include "mmu_decl.h"
67
68#ifdef CONFIG_PPC_STD_MMU_64
69#if PGTABLE_RANGE > USER_VSID_RANGE
70#warning Limited user VSID range means pagetable space is wasted
71#endif
72
73#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
74#warning TASK_SIZE is smaller than it needs to be.
75#endif
76#endif
77
78phys_addr_t memstart_addr = ~0;
79EXPORT_SYMBOL_GPL(memstart_addr);
80phys_addr_t kernstart_addr;
81EXPORT_SYMBOL_GPL(kernstart_addr);
82
83static void pgd_ctor(void *addr)
84{
85 memset(addr, 0, PGD_TABLE_SIZE);
86}
87
88static void pud_ctor(void *addr)
89{
90 memset(addr, 0, PUD_TABLE_SIZE);
91}
92
93static void pmd_ctor(void *addr)
94{
95 memset(addr, 0, PMD_TABLE_SIZE);
96}
97
98struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
99
100
101
102
103
104
105
106
107void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
108{
109 char *name;
110 unsigned long table_size = sizeof(void *) << shift;
111 unsigned long align = table_size;
112
113
114
115
116
117
118
119
120 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
121 HUGEPD_SHIFT_MASK + 1);
122 struct kmem_cache *new;
123
124
125
126
127 BUG_ON(!is_power_of_2(minalign));
128 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
129
130 if (PGT_CACHE(shift))
131 return;
132
133 align = max_t(unsigned long, align, minalign);
134 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
135 new = kmem_cache_create(name, table_size, align, 0, ctor);
136 kfree(name);
137 pgtable_cache[shift - 1] = new;
138 pr_debug("Allocated pgtable cache for order %d\n", shift);
139}
140
141
142void pgtable_cache_init(void)
143{
144 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
145 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
146
147
148
149
150
151 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
152 pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
153
154 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
155 panic("Couldn't allocate pgtable caches");
156 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
157 panic("Couldn't allocate pud pgtable caches");
158}
159
160#ifdef CONFIG_SPARSEMEM_VMEMMAP
161
162
163
164
165
166
167static unsigned long __meminit vmemmap_section_start(unsigned long page)
168{
169 unsigned long offset = page - ((unsigned long)(vmemmap));
170
171
172 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
173}
174
175
176
177
178
179
180static int __meminit vmemmap_populated(unsigned long start, int page_size)
181{
182 unsigned long end = start + page_size;
183 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
184
185 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
186 if (pfn_valid(page_to_pfn((struct page *)start)))
187 return 1;
188
189 return 0;
190}
191
192
193
194
195
196
197
198
199#ifdef CONFIG_PPC_BOOK3E
200static int __meminit vmemmap_create_mapping(unsigned long start,
201 unsigned long page_size,
202 unsigned long phys)
203{
204
205 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
206 _PAGE_KERNEL_RW;
207
208
209 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
210
211
212 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
213
214
215
216
217
218 for (i = 0; i < page_size; i += PAGE_SIZE)
219 BUG_ON(map_kernel_page(start + i, phys, flags));
220
221 return 0;
222}
223
224#ifdef CONFIG_MEMORY_HOTPLUG
225static void vmemmap_remove_mapping(unsigned long start,
226 unsigned long page_size)
227{
228}
229#endif
230#else
231static int __meminit vmemmap_create_mapping(unsigned long start,
232 unsigned long page_size,
233 unsigned long phys)
234{
235 int rc = htab_bolt_mapping(start, start + page_size, phys,
236 pgprot_val(PAGE_KERNEL),
237 mmu_vmemmap_psize, mmu_kernel_ssize);
238 if (rc < 0) {
239 int rc2 = htab_remove_mapping(start, start + page_size,
240 mmu_vmemmap_psize,
241 mmu_kernel_ssize);
242 BUG_ON(rc2 && (rc2 != -ENOENT));
243 }
244 return rc;
245}
246
247#ifdef CONFIG_MEMORY_HOTPLUG
248static void vmemmap_remove_mapping(unsigned long start,
249 unsigned long page_size)
250{
251 int rc = htab_remove_mapping(start, start + page_size,
252 mmu_vmemmap_psize,
253 mmu_kernel_ssize);
254 BUG_ON((rc < 0) && (rc != -ENOENT));
255 WARN_ON(rc == -ENOENT);
256}
257#endif
258
259#endif
260
261struct vmemmap_backing *vmemmap_list;
262static struct vmemmap_backing *next;
263static int num_left;
264static int num_freed;
265
266static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
267{
268 struct vmemmap_backing *vmem_back;
269
270 if (num_freed) {
271 num_freed--;
272 vmem_back = next;
273 next = next->list;
274
275 return vmem_back;
276 }
277
278
279 if (!num_left) {
280 next = vmemmap_alloc_block(PAGE_SIZE, node);
281 if (unlikely(!next)) {
282 WARN_ON(1);
283 return NULL;
284 }
285 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
286 }
287
288 num_left--;
289
290 return next++;
291}
292
293static __meminit void vmemmap_list_populate(unsigned long phys,
294 unsigned long start,
295 int node)
296{
297 struct vmemmap_backing *vmem_back;
298
299 vmem_back = vmemmap_list_alloc(node);
300 if (unlikely(!vmem_back)) {
301 WARN_ON(1);
302 return;
303 }
304
305 vmem_back->phys = phys;
306 vmem_back->virt_addr = start;
307 vmem_back->list = vmemmap_list;
308
309 vmemmap_list = vmem_back;
310}
311
312int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
313{
314 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
315
316
317 start = _ALIGN_DOWN(start, page_size);
318
319 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
320
321 for (; start < end; start += page_size) {
322 void *p;
323 int rc;
324
325 if (vmemmap_populated(start, page_size))
326 continue;
327
328 p = vmemmap_alloc_block(page_size, node);
329 if (!p)
330 return -ENOMEM;
331
332 vmemmap_list_populate(__pa(p), start, node);
333
334 pr_debug(" * %016lx..%016lx allocated at %p\n",
335 start, start + page_size, p);
336
337 rc = vmemmap_create_mapping(start, page_size, __pa(p));
338 if (rc < 0) {
339 pr_warning(
340 "vmemmap_populate: Unable to create vmemmap mapping: %d\n",
341 rc);
342 return -EFAULT;
343 }
344 }
345
346 return 0;
347}
348
349#ifdef CONFIG_MEMORY_HOTPLUG
350static unsigned long vmemmap_list_free(unsigned long start)
351{
352 struct vmemmap_backing *vmem_back, *vmem_back_prev;
353
354 vmem_back_prev = vmem_back = vmemmap_list;
355
356
357 for (; vmem_back; vmem_back = vmem_back->list) {
358 if (vmem_back->virt_addr == start)
359 break;
360 vmem_back_prev = vmem_back;
361 }
362
363 if (unlikely(!vmem_back)) {
364 WARN_ON(1);
365 return 0;
366 }
367
368
369 if (vmem_back == vmemmap_list)
370 vmemmap_list = vmem_back->list;
371 else
372 vmem_back_prev->list = vmem_back->list;
373
374
375 vmem_back->list = next;
376 next = vmem_back;
377 num_freed++;
378
379 return vmem_back->phys;
380}
381
382void __ref vmemmap_free(unsigned long start, unsigned long end)
383{
384 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
385
386 start = _ALIGN_DOWN(start, page_size);
387
388 pr_debug("vmemmap_free %lx...%lx\n", start, end);
389
390 for (; start < end; start += page_size) {
391 unsigned long addr;
392
393
394
395
396
397
398 if (vmemmap_populated(start, page_size))
399 continue;
400
401 addr = vmemmap_list_free(start);
402 if (addr) {
403 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
404
405 if (PageReserved(page)) {
406
407 if (page_size < PAGE_SIZE) {
408
409
410
411
412 WARN_ON_ONCE(1);
413 } else {
414 unsigned int nr_pages =
415 1 << get_order(page_size);
416 while (nr_pages--)
417 free_reserved_page(page++);
418 }
419 } else
420 free_pages((unsigned long)(__va(addr)),
421 get_order(page_size));
422
423 vmemmap_remove_mapping(start, page_size);
424 }
425 }
426}
427#endif
428void register_page_bootmem_memmap(unsigned long section_nr,
429 struct page *start_page, unsigned long size)
430{
431}
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448struct page *realmode_pfn_to_page(unsigned long pfn)
449{
450 struct vmemmap_backing *vmem_back;
451 struct page *page;
452 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
453 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
454
455 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
456 if (pg_va < vmem_back->virt_addr)
457 continue;
458
459
460 if ((pg_va + sizeof(struct page)) <=
461 (vmem_back->virt_addr + page_size)) {
462 page = (struct page *) (vmem_back->phys + pg_va -
463 vmem_back->virt_addr);
464 return page;
465 }
466 }
467
468
469 return NULL;
470}
471EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
472
473#elif defined(CONFIG_FLATMEM)
474
475struct page *realmode_pfn_to_page(unsigned long pfn)
476{
477 struct page *page = pfn_to_page(pfn);
478 return page;
479}
480EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
481
482#endif
483