1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/signal.h>
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/mman.h>
29#include <linux/mm.h>
30#include <linux/swap.h>
31#include <linux/stddef.h>
32#include <linux/vmalloc.h>
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/bootmem.h>
36#include <linux/highmem.h>
37#include <linux/idr.h>
38#include <linux/nodemask.h>
39#include <linux/module.h>
40#include <linux/poison.h>
41
42#include <asm/pgalloc.h>
43#include <asm/page.h>
44#include <asm/prom.h>
45#include <asm/lmb.h>
46#include <asm/rtas.h>
47#include <asm/io.h>
48#include <asm/mmu_context.h>
49#include <asm/pgtable.h>
50#include <asm/mmu.h>
51#include <asm/uaccess.h>
52#include <asm/smp.h>
53#include <asm/machdep.h>
54#include <asm/tlb.h>
55#include <asm/eeh.h>
56#include <asm/processor.h>
57#include <asm/mmzone.h>
58#include <asm/cputable.h>
59#include <asm/sections.h>
60#include <asm/system.h>
61#include <asm/iommu.h>
62#include <asm/abs_addr.h>
63#include <asm/vdso.h>
64
65#include "mmu_decl.h"
66
67#if PGTABLE_RANGE > USER_VSID_RANGE
68#warning Limited user VSID range means pagetable space is wasted
69#endif
70
71#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
72#warning TASK_SIZE is smaller than it needs to be.
73#endif
74
75
76unsigned long __max_memory;
77
78void free_initmem(void)
79{
80 unsigned long addr;
81
82 addr = (unsigned long)__init_begin;
83 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
84 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
85 ClearPageReserved(virt_to_page(addr));
86 init_page_count(virt_to_page(addr));
87 free_page(addr);
88 totalram_pages++;
89 }
90 printk ("Freeing unused kernel memory: %luk freed\n",
91 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
92}
93
94#ifdef CONFIG_BLK_DEV_INITRD
95void free_initrd_mem(unsigned long start, unsigned long end)
96{
97 if (start < end)
98 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
99 for (; start < end; start += PAGE_SIZE) {
100 ClearPageReserved(virt_to_page(start));
101 init_page_count(virt_to_page(start));
102 free_page(start);
103 totalram_pages++;
104 }
105}
106#endif
107
108#ifdef CONFIG_PROC_KCORE
109static struct kcore_list kcore_vmem;
110
111static int __init setup_kcore(void)
112{
113 int i;
114
115 for (i=0; i < lmb.memory.cnt; i++) {
116 unsigned long base, size;
117 struct kcore_list *kcore_mem;
118
119 base = lmb.memory.region[i].base;
120 size = lmb.memory.region[i].size;
121
122
123 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
124 if (!kcore_mem)
125 panic("%s: kmalloc failed\n", __FUNCTION__);
126
127 kclist_add(kcore_mem, __va(base), size);
128 }
129
130 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
131
132 return 0;
133}
134module_init(setup_kcore);
135#endif
136
137static void zero_ctor(struct kmem_cache *cache, void *addr)
138{
139 memset(addr, 0, kmem_cache_size(cache));
140}
141
142static const unsigned int pgtable_cache_size[2] = {
143 PGD_TABLE_SIZE, PMD_TABLE_SIZE
144};
145static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
146#ifdef CONFIG_PPC_64K_PAGES
147 "pgd_cache", "pmd_cache",
148#else
149 "pgd_cache", "pud_pmd_cache",
150#endif
151};
152
153#ifdef CONFIG_HUGETLB_PAGE
154
155
156
157struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
158#else
159struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
160#endif
161
162void pgtable_cache_init(void)
163{
164 int i;
165
166 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
167 int size = pgtable_cache_size[i];
168 const char *name = pgtable_cache_name[i];
169
170 pr_debug("Allocating page table cache %s (#%d) "
171 "for size: %08x...\n", name, i, size);
172 pgtable_cache[i] = kmem_cache_create(name,
173 size, size,
174 SLAB_PANIC,
175 zero_ctor);
176 }
177}
178
179#ifdef CONFIG_SPARSEMEM_VMEMMAP
180
181
182
183
184
185
186unsigned long __meminit vmemmap_section_start(unsigned long page)
187{
188 unsigned long offset = page - ((unsigned long)(vmemmap));
189
190
191 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
192}
193
194
195
196
197
198
199int __meminit vmemmap_populated(unsigned long start, int page_size)
200{
201 unsigned long end = start + page_size;
202
203 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
204 if (pfn_valid(vmemmap_section_start(start)))
205 return 1;
206
207 return 0;
208}
209
210int __meminit vmemmap_populate(struct page *start_page,
211 unsigned long nr_pages, int node)
212{
213 unsigned long mode_rw;
214 unsigned long start = (unsigned long)start_page;
215 unsigned long end = (unsigned long)(start_page + nr_pages);
216 unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
217
218 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
219
220
221 start = _ALIGN_DOWN(start, page_size);
222
223 for (; start < end; start += page_size) {
224 int mapped;
225 void *p;
226
227 if (vmemmap_populated(start, page_size))
228 continue;
229
230 p = vmemmap_alloc_block(page_size, node);
231 if (!p)
232 return -ENOMEM;
233
234 pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
235 start, p, __pa(p));
236
237 mapped = htab_bolt_mapping(start, start + page_size,
238 __pa(p), mode_rw, mmu_linear_psize,
239 mmu_kernel_ssize);
240 BUG_ON(mapped < 0);
241 }
242
243 return 0;
244}
245#endif
246