1
2
3
4
5
6
7
8
9
10#include <linux/bootmem.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/memblock.h>
14#include <linux/mm.h>
15#include <linux/initrd.h>
16#include <linux/pagemap.h>
17#include <linux/pfn.h>
18#include <linux/slab.h>
19#include <linux/swap.h>
20#include <linux/export.h>
21
22#include <asm/page.h>
23#include <asm/mmu_context.h>
24#include <asm/pgalloc.h>
25#include <asm/sections.h>
26#include <asm/tlb.h>
27#include <asm/fixmap.h>
28
29
30int mem_init_done;
31
32#ifndef CONFIG_MMU
33unsigned int __page_offset;
34EXPORT_SYMBOL(__page_offset);
35
36#else
37static int init_bootmem_done;
38#endif
39
40char *klimit = _end;
41
42
43
44
45
46unsigned long memory_start;
47EXPORT_SYMBOL(memory_start);
48unsigned long memory_size;
49EXPORT_SYMBOL(memory_size);
50unsigned long lowmem_size;
51
52#ifdef CONFIG_HIGHMEM
53pte_t *kmap_pte;
54EXPORT_SYMBOL(kmap_pte);
55pgprot_t kmap_prot;
56EXPORT_SYMBOL(kmap_prot);
57
58static inline pte_t *virt_to_kpte(unsigned long vaddr)
59{
60 return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
61 vaddr), vaddr);
62}
63
64static void __init highmem_init(void)
65{
66 pr_debug("%x\n", (u32)PKMAP_BASE);
67 map_page(PKMAP_BASE, 0, 0);
68 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
69
70 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
71 kmap_prot = PAGE_KERNEL;
72}
73
74static unsigned long highmem_setup(void)
75{
76 unsigned long pfn;
77 unsigned long reservedpages = 0;
78
79 for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
80 struct page *page = pfn_to_page(pfn);
81
82
83 if (memblock_is_reserved(pfn << PAGE_SHIFT))
84 continue;
85 free_highmem_page(page);
86 reservedpages++;
87 }
88 pr_info("High memory: %luk\n",
89 totalhigh_pages << (PAGE_SHIFT-10));
90
91 return reservedpages;
92}
93#endif
94
95
96
97
98static void __init paging_init(void)
99{
100 unsigned long zones_size[MAX_NR_ZONES];
101#ifdef CONFIG_MMU
102 int idx;
103
104
105 for (idx = 0; idx < __end_of_fixed_addresses; idx++)
106 clear_fixmap(idx);
107#endif
108
109
110 memset(zones_size, 0, sizeof(zones_size));
111
112#ifdef CONFIG_HIGHMEM
113 highmem_init();
114
115 zones_size[ZONE_DMA] = max_low_pfn;
116 zones_size[ZONE_HIGHMEM] = max_pfn;
117#else
118 zones_size[ZONE_DMA] = max_pfn;
119#endif
120
121
122 free_area_init_nodes(zones_size);
123}
124
125void __init setup_memory(void)
126{
127 unsigned long map_size;
128 struct memblock_region *reg;
129
130#ifndef CONFIG_MMU
131 u32 kernel_align_start, kernel_align_size;
132
133
134 for_each_memblock(memory, reg) {
135 memory_start = (u32)reg->base;
136 lowmem_size = reg->size;
137 if ((memory_start <= (u32)_text) &&
138 ((u32)_text <= (memory_start + lowmem_size - 1))) {
139 memory_size = lowmem_size;
140 PAGE_OFFSET = memory_start;
141 pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
142 __func__, (u32) memory_start,
143 (u32) memory_size);
144 break;
145 }
146 }
147
148 if (!memory_start || !memory_size) {
149 panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
150 __func__, (u32) memory_start, (u32) memory_size);
151 }
152
153
154 kernel_align_start = PAGE_DOWN((u32)_text);
155
156 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
157 pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
158 __func__, kernel_align_start, kernel_align_start
159 + kernel_align_size, kernel_align_size);
160 memblock_reserve(kernel_align_start, kernel_align_size);
161#endif
162
163
164
165
166
167
168
169
170
171
172
173
174 min_low_pfn = memory_start >> PAGE_SHIFT;
175
176 num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
177 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
178 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
179
180 pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
181 pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
182 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
183 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
184
185
186
187
188
189
190
191
192 map_size = init_bootmem_node(NODE_DATA(0),
193 PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
194 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
195
196
197 for_each_memblock(memory, reg) {
198 unsigned long start_pfn, end_pfn;
199
200 start_pfn = memblock_region_memory_base_pfn(reg);
201 end_pfn = memblock_region_memory_end_pfn(reg);
202 memblock_set_node(start_pfn << PAGE_SHIFT,
203 (end_pfn - start_pfn) << PAGE_SHIFT,
204 &memblock.memory, 0);
205 }
206
207
208 free_bootmem_with_active_regions(0, max_low_pfn);
209
210
211 for_each_memblock(reserved, reg) {
212 unsigned long top = reg->base + reg->size - 1;
213
214 pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
215 (u32) reg->base, (u32) reg->size, top,
216 memory_start + lowmem_size - 1);
217
218 if (top <= (memory_start + lowmem_size - 1)) {
219 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
220 } else if (reg->base < (memory_start + lowmem_size - 1)) {
221 unsigned long trunc_size = memory_start + lowmem_size -
222 reg->base;
223 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
224 }
225 }
226
227
228 sparse_memory_present_with_active_regions(0);
229
230#ifdef CONFIG_MMU
231 init_bootmem_done = 1;
232#endif
233 paging_init();
234}
235
236#ifdef CONFIG_BLK_DEV_INITRD
237void free_initrd_mem(unsigned long start, unsigned long end)
238{
239 free_reserved_area(start, end, 0, "initrd");
240}
241#endif
242
243void free_initmem(void)
244{
245 free_initmem_default(0);
246}
247
248void __init mem_init(void)
249{
250 pg_data_t *pgdat;
251 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
252
253 high_memory = (void *)__va(memory_start + lowmem_size - 1);
254
255
256 totalram_pages += free_all_bootmem();
257
258 for_each_online_pgdat(pgdat) {
259 unsigned long i;
260 struct page *page;
261
262 for (i = 0; i < pgdat->node_spanned_pages; i++) {
263 if (!pfn_valid(pgdat->node_start_pfn + i))
264 continue;
265 page = pgdat_page_nr(pgdat, i);
266 if (PageReserved(page))
267 reservedpages++;
268 }
269 }
270
271#ifdef CONFIG_HIGHMEM
272 reservedpages -= highmem_setup();
273#endif
274
275 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
276 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
277 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
278 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
279
280 pr_info("Memory: %luk/%luk available (%luk kernel code, ",
281 nr_free_pages() << (PAGE_SHIFT-10),
282 num_physpages << (PAGE_SHIFT-10),
283 codesize >> 10);
284 pr_cont("%luk reserved, %luk data, %luk bss, %luk init)\n",
285 reservedpages << (PAGE_SHIFT-10),
286 datasize >> 10,
287 bsssize >> 10,
288 initsize >> 10);
289
290#ifdef CONFIG_MMU
291 pr_info("Kernel virtual memory layout:\n");
292 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
293#ifdef CONFIG_HIGHMEM
294 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
295 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
296#endif
297 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
298 ioremap_bot, ioremap_base);
299 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
300 (unsigned long)VMALLOC_START, VMALLOC_END);
301#endif
302 mem_init_done = 1;
303}
304
305#ifndef CONFIG_MMU
306int page_is_ram(unsigned long pfn)
307{
308 return __range_ok(pfn, 0);
309}
310#else
311int page_is_ram(unsigned long pfn)
312{
313 return pfn < max_low_pfn;
314}
315
316
317
318
319static void mm_cmdline_setup(void)
320{
321 unsigned long maxmem = 0;
322 char *p = cmd_line;
323
324
325 p = strstr(cmd_line, "mem=");
326 if (p) {
327 p += 4;
328 maxmem = memparse(p, &p);
329 if (maxmem && memory_size > maxmem) {
330 memory_size = maxmem;
331 memblock.memory.regions[0].size = memory_size;
332 }
333 }
334}
335
336
337
338
339static void __init mmu_init_hw(void)
340{
341
342
343
344
345
346
347
348
349
350
351
352
353 __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
354 "mts rzpr, r11;"
355 : : : "r11");
356}
357
358
359
360
361
362
363
364
365asmlinkage void __init mmu_init(void)
366{
367 unsigned int kstart, ksize;
368
369 if (!memblock.reserved.cnt) {
370 pr_emerg("Error memory count\n");
371 machine_restart(NULL);
372 }
373
374 if ((u32) memblock.memory.regions[0].size < 0x400000) {
375 pr_emerg("Memory must be greater than 4MB\n");
376 machine_restart(NULL);
377 }
378
379 if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
380 pr_emerg("Kernel size is greater than memory node\n");
381 machine_restart(NULL);
382 }
383
384
385 memory_start = (u32) memblock.memory.regions[0].base;
386 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
387
388 if (lowmem_size > CONFIG_LOWMEM_SIZE) {
389 lowmem_size = CONFIG_LOWMEM_SIZE;
390#ifndef CONFIG_HIGHMEM
391 memory_size = lowmem_size;
392#endif
393 }
394
395 mm_cmdline_setup();
396
397
398
399
400
401 kstart = __pa(CONFIG_KERNEL_START);
402
403 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
404 memblock_reserve(kstart, ksize);
405
406#if defined(CONFIG_BLK_DEV_INITRD)
407
408 if (initrd_start) {
409 unsigned long size;
410 size = initrd_end - initrd_start;
411 memblock_reserve(virt_to_phys(initrd_start), size);
412 }
413#endif
414
415
416 mmu_init_hw();
417
418
419 mapin_ram();
420
421
422#ifdef CONFIG_HIGHMEM
423 ioremap_base = ioremap_bot = PKMAP_BASE;
424#else
425 ioremap_base = ioremap_bot = FIXADDR_START;
426#endif
427
428
429 mmu_context_init();
430
431
432
433
434 memblock_set_current_limit(memory_start + lowmem_size - 1);
435}
436
437
438void __init *early_get_page(void)
439{
440 void *p;
441 if (init_bootmem_done) {
442 p = alloc_bootmem_pages(PAGE_SIZE);
443 } else {
444
445
446
447
448 p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
449 memory_start + kernel_tlb));
450 }
451 return p;
452}
453
454#endif
455
456void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
457{
458 if (mem_init_done)
459 return kmalloc(size, mask);
460 else
461 return alloc_bootmem(size);
462}
463
464void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
465{
466 void *p;
467
468 if (mem_init_done)
469 p = kzalloc(size, mask);
470 else {
471 p = alloc_bootmem(size);
472 if (p)
473 memset(p, 0, size);
474 }
475 return p;
476}
477