1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#undef DEBUG
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42#include <linux/poison.h>
43#include <linux/memblock.h>
44#include <linux/hugetlb.h>
45#include <linux/slab.h>
46
47#include <asm/pgalloc.h>
48#include <asm/page.h>
49#include <asm/prom.h>
50#include <asm/rtas.h>
51#include <asm/io.h>
52#include <asm/mmu_context.h>
53#include <asm/pgtable.h>
54#include <asm/mmu.h>
55#include <asm/uaccess.h>
56#include <asm/smp.h>
57#include <asm/machdep.h>
58#include <asm/tlb.h>
59#include <asm/eeh.h>
60#include <asm/processor.h>
61#include <asm/mmzone.h>
62#include <asm/cputable.h>
63#include <asm/sections.h>
64#include <asm/iommu.h>
65#include <asm/vdso.h>
66
67#include "mmu_decl.h"
68
69#ifdef CONFIG_PPC_STD_MMU_64
70#if PGTABLE_RANGE > USER_VSID_RANGE
71#warning Limited user VSID range means pagetable space is wasted
72#endif
73
74#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
75#warning TASK_SIZE is smaller than it needs to be.
76#endif
77#endif
78
79phys_addr_t memstart_addr = ~0;
80EXPORT_SYMBOL_GPL(memstart_addr);
81phys_addr_t kernstart_addr;
82EXPORT_SYMBOL_GPL(kernstart_addr);
83
84static void pgd_ctor(void *addr)
85{
86 memset(addr, 0, PGD_TABLE_SIZE);
87}
88
89static void pmd_ctor(void *addr)
90{
91#ifdef CONFIG_TRANSPARENT_HUGEPAGE
92 memset(addr, 0, PMD_TABLE_SIZE * 2);
93#else
94 memset(addr, 0, PMD_TABLE_SIZE);
95#endif
96}
97
98struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
99
100
101
102
103
104
105
106
107void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
108{
109 char *name;
110 unsigned long table_size = sizeof(void *) << shift;
111 unsigned long align = table_size;
112
113
114
115
116
117
118
119
120 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
121 HUGEPD_SHIFT_MASK + 1);
122 struct kmem_cache *new;
123
124
125
126
127 BUG_ON(!is_power_of_2(minalign));
128 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
129
130 if (PGT_CACHE(shift))
131 return;
132
133 align = max_t(unsigned long, align, minalign);
134 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
135 new = kmem_cache_create(name, table_size, align, 0, ctor);
136 kfree(name);
137 pgtable_cache[shift - 1] = new;
138 pr_debug("Allocated pgtable cache for order %d\n", shift);
139}
140
141
142void pgtable_cache_init(void)
143{
144 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
145 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
146 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
147 panic("Couldn't allocate pgtable caches");
148
149
150
151
152
153 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
154}
155
156#ifdef CONFIG_SPARSEMEM_VMEMMAP
157
158
159
160
161
162
163static unsigned long __meminit vmemmap_section_start(unsigned long page)
164{
165 unsigned long offset = page - ((unsigned long)(vmemmap));
166
167
168 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
169}
170
171
172
173
174
175
176static int __meminit vmemmap_populated(unsigned long start, int page_size)
177{
178 unsigned long end = start + page_size;
179 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
180
181 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
182 if (pfn_valid(page_to_pfn((struct page *)start)))
183 return 1;
184
185 return 0;
186}
187
188
189
190
191
192
193
194
195#ifdef CONFIG_PPC_BOOK3E
196static void __meminit vmemmap_create_mapping(unsigned long start,
197 unsigned long page_size,
198 unsigned long phys)
199{
200
201 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
202 _PAGE_KERNEL_RW;
203
204
205 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
206
207
208 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
209
210
211
212
213
214 for (i = 0; i < page_size; i += PAGE_SIZE)
215 BUG_ON(map_kernel_page(start + i, phys, flags));
216}
217
218#ifdef CONFIG_MEMORY_HOTPLUG
219static void vmemmap_remove_mapping(unsigned long start,
220 unsigned long page_size)
221{
222}
223#endif
224#else
225static void __meminit vmemmap_create_mapping(unsigned long start,
226 unsigned long page_size,
227 unsigned long phys)
228{
229 int mapped = htab_bolt_mapping(start, start + page_size, phys,
230 pgprot_val(PAGE_KERNEL),
231 mmu_vmemmap_psize,
232 mmu_kernel_ssize);
233 BUG_ON(mapped < 0);
234}
235
236#ifdef CONFIG_MEMORY_HOTPLUG
237extern int htab_remove_mapping(unsigned long vstart, unsigned long vend,
238 int psize, int ssize);
239
240static void vmemmap_remove_mapping(unsigned long start,
241 unsigned long page_size)
242{
243 int mapped = htab_remove_mapping(start, start + page_size,
244 mmu_vmemmap_psize,
245 mmu_kernel_ssize);
246 BUG_ON(mapped < 0);
247}
248#endif
249
250#endif
251
252struct vmemmap_backing *vmemmap_list;
253static struct vmemmap_backing *next;
254static int num_left;
255static int num_freed;
256
257static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
258{
259 struct vmemmap_backing *vmem_back;
260
261 if (num_freed) {
262 num_freed--;
263 vmem_back = next;
264 next = next->list;
265
266 return vmem_back;
267 }
268
269
270 if (!num_left) {
271 next = vmemmap_alloc_block(PAGE_SIZE, node);
272 if (unlikely(!next)) {
273 WARN_ON(1);
274 return NULL;
275 }
276 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
277 }
278
279 num_left--;
280
281 return next++;
282}
283
284static __meminit void vmemmap_list_populate(unsigned long phys,
285 unsigned long start,
286 int node)
287{
288 struct vmemmap_backing *vmem_back;
289
290 vmem_back = vmemmap_list_alloc(node);
291 if (unlikely(!vmem_back)) {
292 WARN_ON(1);
293 return;
294 }
295
296 vmem_back->phys = phys;
297 vmem_back->virt_addr = start;
298 vmem_back->list = vmemmap_list;
299
300 vmemmap_list = vmem_back;
301}
302
303int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
304 struct vmem_altmap *altmap)
305{
306 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
307
308
309 start = _ALIGN_DOWN(start, page_size);
310
311 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
312
313 for (; start < end; start += page_size) {
314 void *p;
315
316 if (vmemmap_populated(start, page_size))
317 continue;
318
319 p = vmemmap_alloc_block(page_size, node);
320 if (!p)
321 return -ENOMEM;
322
323 vmemmap_list_populate(__pa(p), start, node);
324
325 pr_debug(" * %016lx..%016lx allocated at %p\n",
326 start, start + page_size, p);
327
328 vmemmap_create_mapping(start, page_size, __pa(p));
329 }
330
331 return 0;
332}
333
334#ifdef CONFIG_MEMORY_HOTPLUG
335static unsigned long vmemmap_list_free(unsigned long start)
336{
337 struct vmemmap_backing *vmem_back, *vmem_back_prev;
338
339 vmem_back_prev = vmem_back = vmemmap_list;
340
341
342 for (; vmem_back; vmem_back = vmem_back->list) {
343 if (vmem_back->virt_addr == start)
344 break;
345 vmem_back_prev = vmem_back;
346 }
347
348 if (unlikely(!vmem_back)) {
349 WARN_ON(1);
350 return 0;
351 }
352
353
354 if (vmem_back == vmemmap_list)
355 vmemmap_list = vmem_back->list;
356 else
357 vmem_back_prev->list = vmem_back->list;
358
359
360 vmem_back->list = next;
361 next = vmem_back;
362 num_freed++;
363
364 return vmem_back->phys;
365}
366
367void __ref vmemmap_free(unsigned long start, unsigned long end,
368 struct vmem_altmap *altmap)
369{
370 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
371
372 start = _ALIGN_DOWN(start, page_size);
373
374 pr_debug("vmemmap_free %lx...%lx\n", start, end);
375
376 for (; start < end; start += page_size) {
377 unsigned long addr;
378
379
380
381
382
383
384 if (vmemmap_populated(start, page_size))
385 continue;
386
387 addr = vmemmap_list_free(start);
388 if (addr) {
389 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
390
391 if (PageReserved(page)) {
392
393 if (page_size < PAGE_SIZE) {
394
395
396
397
398 WARN_ON_ONCE(1);
399 } else {
400 unsigned int nr_pages =
401 1 << get_order(page_size);
402 while (nr_pages--)
403 free_reserved_page(page++);
404 }
405 } else
406 free_pages((unsigned long)(__va(addr)),
407 get_order(page_size));
408
409 vmemmap_remove_mapping(start, page_size);
410 }
411 }
412}
413#endif
414void register_page_bootmem_memmap(unsigned long section_nr,
415 struct page *start_page, unsigned long size)
416{
417}
418#endif
419
420