1
2
3
4
5
6
7
8
9
10#include <linux/memblock.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/initrd.h>
15#include <linux/pagemap.h>
16#include <linux/pfn.h>
17#include <linux/slab.h>
18#include <linux/swap.h>
19#include <linux/export.h>
20
21#include <asm/page.h>
22#include <asm/mmu_context.h>
23#include <asm/pgalloc.h>
24#include <asm/sections.h>
25#include <asm/tlb.h>
26#include <asm/fixmap.h>
27
28
29int mem_init_done;
30
31#ifndef CONFIG_MMU
32unsigned int __page_offset;
33EXPORT_SYMBOL(__page_offset);
34#endif
35
36char *klimit = _end;
37
38
39
40
41
42unsigned long memory_start;
43EXPORT_SYMBOL(memory_start);
44unsigned long memory_size;
45EXPORT_SYMBOL(memory_size);
46unsigned long lowmem_size;
47
48#ifdef CONFIG_HIGHMEM
49pte_t *kmap_pte;
50EXPORT_SYMBOL(kmap_pte);
51pgprot_t kmap_prot;
52EXPORT_SYMBOL(kmap_prot);
53
54static inline pte_t *virt_to_kpte(unsigned long vaddr)
55{
56 return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
57 vaddr), vaddr);
58}
59
60static void __init highmem_init(void)
61{
62 pr_debug("%x\n", (u32)PKMAP_BASE);
63 map_page(PKMAP_BASE, 0, 0);
64 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
65
66 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
67 kmap_prot = PAGE_KERNEL;
68}
69
70static void highmem_setup(void)
71{
72 unsigned long pfn;
73
74 for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
75 struct page *page = pfn_to_page(pfn);
76
77
78 if (!memblock_is_reserved(pfn << PAGE_SHIFT))
79 free_highmem_page(page);
80 }
81}
82#endif
83
84
85
86
87static void __init paging_init(void)
88{
89 unsigned long zones_size[MAX_NR_ZONES];
90#ifdef CONFIG_MMU
91 int idx;
92
93
94 for (idx = 0; idx < __end_of_fixed_addresses; idx++)
95 clear_fixmap(idx);
96#endif
97
98
99 memset(zones_size, 0, sizeof(zones_size));
100
101#ifdef CONFIG_HIGHMEM
102 highmem_init();
103
104 zones_size[ZONE_DMA] = max_low_pfn;
105 zones_size[ZONE_HIGHMEM] = max_pfn;
106#else
107 zones_size[ZONE_DMA] = max_pfn;
108#endif
109
110
111 free_area_init_nodes(zones_size);
112}
113
114void __init setup_memory(void)
115{
116 struct memblock_region *reg;
117
118#ifndef CONFIG_MMU
119 u32 kernel_align_start, kernel_align_size;
120
121
122 for_each_memblock(memory, reg) {
123 memory_start = (u32)reg->base;
124 lowmem_size = reg->size;
125 if ((memory_start <= (u32)_text) &&
126 ((u32)_text <= (memory_start + lowmem_size - 1))) {
127 memory_size = lowmem_size;
128 PAGE_OFFSET = memory_start;
129 pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
130 __func__, (u32) memory_start,
131 (u32) memory_size);
132 break;
133 }
134 }
135
136 if (!memory_start || !memory_size) {
137 panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
138 __func__, (u32) memory_start, (u32) memory_size);
139 }
140
141
142 kernel_align_start = PAGE_DOWN((u32)_text);
143
144 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
145 pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
146 __func__, kernel_align_start, kernel_align_start
147 + kernel_align_size, kernel_align_size);
148 memblock_reserve(kernel_align_start, kernel_align_size);
149#endif
150
151
152
153
154
155
156
157
158
159
160
161 min_low_pfn = memory_start >> PAGE_SHIFT;
162
163 max_mapnr = memory_size >> PAGE_SHIFT;
164 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
165 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
166
167 pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
168 pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
169 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
170 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
171
172
173 for_each_memblock(memory, reg) {
174 unsigned long start_pfn, end_pfn;
175
176 start_pfn = memblock_region_memory_base_pfn(reg);
177 end_pfn = memblock_region_memory_end_pfn(reg);
178 memblock_set_node(start_pfn << PAGE_SHIFT,
179 (end_pfn - start_pfn) << PAGE_SHIFT,
180 &memblock.memory, 0);
181 }
182
183
184 sparse_memory_present_with_active_regions(0);
185
186 paging_init();
187}
188
189#ifdef CONFIG_BLK_DEV_INITRD
190void free_initrd_mem(unsigned long start, unsigned long end)
191{
192 free_reserved_area((void *)start, (void *)end, -1, "initrd");
193}
194#endif
195
196void free_initmem(void)
197{
198 free_initmem_default(-1);
199}
200
201void __init mem_init(void)
202{
203 high_memory = (void *)__va(memory_start + lowmem_size - 1);
204
205
206 memblock_free_all();
207#ifdef CONFIG_HIGHMEM
208 highmem_setup();
209#endif
210
211 mem_init_print_info(NULL);
212#ifdef CONFIG_MMU
213 pr_info("Kernel virtual memory layout:\n");
214 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
215#ifdef CONFIG_HIGHMEM
216 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
217 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
218#endif
219 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
220 ioremap_bot, ioremap_base);
221 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
222 (unsigned long)VMALLOC_START, VMALLOC_END);
223#endif
224 mem_init_done = 1;
225}
226
227#ifndef CONFIG_MMU
228int page_is_ram(unsigned long pfn)
229{
230 return __range_ok(pfn, 0);
231}
232#else
233int page_is_ram(unsigned long pfn)
234{
235 return pfn < max_low_pfn;
236}
237
238
239
240
241static void mm_cmdline_setup(void)
242{
243 unsigned long maxmem = 0;
244 char *p = cmd_line;
245
246
247 p = strstr(cmd_line, "mem=");
248 if (p) {
249 p += 4;
250 maxmem = memparse(p, &p);
251 if (maxmem && memory_size > maxmem) {
252 memory_size = maxmem;
253 memblock.memory.regions[0].size = memory_size;
254 }
255 }
256}
257
258
259
260
261static void __init mmu_init_hw(void)
262{
263
264
265
266
267
268
269
270
271
272
273
274
275 __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
276 "mts rzpr, r11;"
277 : : : "r11");
278}
279
280
281
282
283
284
285
286
287asmlinkage void __init mmu_init(void)
288{
289 unsigned int kstart, ksize;
290
291 if (!memblock.reserved.cnt) {
292 pr_emerg("Error memory count\n");
293 machine_restart(NULL);
294 }
295
296 if ((u32) memblock.memory.regions[0].size < 0x400000) {
297 pr_emerg("Memory must be greater than 4MB\n");
298 machine_restart(NULL);
299 }
300
301 if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
302 pr_emerg("Kernel size is greater than memory node\n");
303 machine_restart(NULL);
304 }
305
306
307 memory_start = (u32) memblock.memory.regions[0].base;
308 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
309
310 if (lowmem_size > CONFIG_LOWMEM_SIZE) {
311 lowmem_size = CONFIG_LOWMEM_SIZE;
312#ifndef CONFIG_HIGHMEM
313 memory_size = lowmem_size;
314#endif
315 }
316
317 mm_cmdline_setup();
318
319
320
321
322
323 kstart = __pa(CONFIG_KERNEL_START);
324
325 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
326 memblock_reserve(kstart, ksize);
327
328#if defined(CONFIG_BLK_DEV_INITRD)
329
330 if (initrd_start) {
331 unsigned long size;
332 size = initrd_end - initrd_start;
333 memblock_reserve(__virt_to_phys(initrd_start), size);
334 }
335#endif
336
337
338 mmu_init_hw();
339
340
341 mapin_ram();
342
343
344#ifdef CONFIG_HIGHMEM
345 ioremap_base = ioremap_bot = PKMAP_BASE;
346#else
347 ioremap_base = ioremap_bot = FIXADDR_START;
348#endif
349
350
351 mmu_context_init();
352
353
354
355
356 memblock_set_current_limit(memory_start + lowmem_size - 1);
357}
358
359
360void __init *early_get_page(void)
361{
362
363
364
365
366 return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
367 memory_start + kernel_tlb));
368}
369
370#endif
371
372void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
373{
374 void *p;
375
376 if (mem_init_done)
377 p = kzalloc(size, mask);
378 else {
379 p = memblock_alloc(size, SMP_CACHE_BYTES);
380 if (p)
381 memset(p, 0, size);
382 }
383 return p;
384}
385