1
2
3
4
5
6
7
8
9
10#include <linux/bootmem.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/memblock.h>
14#include <linux/mm.h>
15#include <linux/initrd.h>
16#include <linux/pagemap.h>
17#include <linux/pfn.h>
18#include <linux/slab.h>
19#include <linux/swap.h>
20#include <linux/export.h>
21
22#include <asm/page.h>
23#include <asm/mmu_context.h>
24#include <asm/pgalloc.h>
25#include <asm/sections.h>
26#include <asm/tlb.h>
27#include <asm/fixmap.h>
28
29
30int mem_init_done;
31
32#ifndef CONFIG_MMU
33unsigned int __page_offset;
34EXPORT_SYMBOL(__page_offset);
35
36#else
37static int init_bootmem_done;
38#endif
39
40char *klimit = _end;
41
42
43
44
45
46unsigned long memory_start;
47EXPORT_SYMBOL(memory_start);
48unsigned long memory_size;
49EXPORT_SYMBOL(memory_size);
50unsigned long lowmem_size;
51
52#ifdef CONFIG_HIGHMEM
53pte_t *kmap_pte;
54EXPORT_SYMBOL(kmap_pte);
55pgprot_t kmap_prot;
56EXPORT_SYMBOL(kmap_prot);
57
58static inline pte_t *virt_to_kpte(unsigned long vaddr)
59{
60 return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
61 vaddr), vaddr);
62}
63
64static void __init highmem_init(void)
65{
66 pr_debug("%x\n", (u32)PKMAP_BASE);
67 map_page(PKMAP_BASE, 0, 0);
68 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
69
70 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
71 kmap_prot = PAGE_KERNEL;
72}
73
74static void highmem_setup(void)
75{
76 unsigned long pfn;
77
78 for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
79 struct page *page = pfn_to_page(pfn);
80
81
82 if (!memblock_is_reserved(pfn << PAGE_SHIFT))
83 free_highmem_page(page);
84 }
85}
86#endif
87
88
89
90
91static void __init paging_init(void)
92{
93 unsigned long zones_size[MAX_NR_ZONES];
94#ifdef CONFIG_MMU
95 int idx;
96
97
98 for (idx = 0; idx < __end_of_fixed_addresses; idx++)
99 clear_fixmap(idx);
100#endif
101
102
103 memset(zones_size, 0, sizeof(zones_size));
104
105#ifdef CONFIG_HIGHMEM
106 highmem_init();
107
108 zones_size[ZONE_DMA] = max_low_pfn;
109 zones_size[ZONE_HIGHMEM] = max_pfn;
110#else
111 zones_size[ZONE_DMA] = max_pfn;
112#endif
113
114
115 free_area_init_nodes(zones_size);
116}
117
118void __init setup_memory(void)
119{
120 unsigned long map_size;
121 struct memblock_region *reg;
122
123#ifndef CONFIG_MMU
124 u32 kernel_align_start, kernel_align_size;
125
126
127 for_each_memblock(memory, reg) {
128 memory_start = (u32)reg->base;
129 lowmem_size = reg->size;
130 if ((memory_start <= (u32)_text) &&
131 ((u32)_text <= (memory_start + lowmem_size - 1))) {
132 memory_size = lowmem_size;
133 PAGE_OFFSET = memory_start;
134 pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
135 __func__, (u32) memory_start,
136 (u32) memory_size);
137 break;
138 }
139 }
140
141 if (!memory_start || !memory_size) {
142 panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
143 __func__, (u32) memory_start, (u32) memory_size);
144 }
145
146
147 kernel_align_start = PAGE_DOWN((u32)_text);
148
149 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
150 pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
151 __func__, kernel_align_start, kernel_align_start
152 + kernel_align_size, kernel_align_size);
153 memblock_reserve(kernel_align_start, kernel_align_size);
154#endif
155
156
157
158
159
160
161
162
163
164
165
166 min_low_pfn = memory_start >> PAGE_SHIFT;
167
168 max_mapnr = memory_size >> PAGE_SHIFT;
169 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
170 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
171
172 pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
173 pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
174 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
175 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
176
177
178
179
180
181
182
183
184 map_size = init_bootmem_node(NODE_DATA(0),
185 PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
186 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
187
188
189 for_each_memblock(memory, reg) {
190 unsigned long start_pfn, end_pfn;
191
192 start_pfn = memblock_region_memory_base_pfn(reg);
193 end_pfn = memblock_region_memory_end_pfn(reg);
194 memblock_set_node(start_pfn << PAGE_SHIFT,
195 (end_pfn - start_pfn) << PAGE_SHIFT,
196 &memblock.memory, 0);
197 }
198
199
200 free_bootmem_with_active_regions(0, max_low_pfn);
201
202
203 for_each_memblock(reserved, reg) {
204 unsigned long top = reg->base + reg->size - 1;
205
206 pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
207 (u32) reg->base, (u32) reg->size, top,
208 memory_start + lowmem_size - 1);
209
210 if (top <= (memory_start + lowmem_size - 1)) {
211 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
212 } else if (reg->base < (memory_start + lowmem_size - 1)) {
213 unsigned long trunc_size = memory_start + lowmem_size -
214 reg->base;
215 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
216 }
217 }
218
219
220 sparse_memory_present_with_active_regions(0);
221
222#ifdef CONFIG_MMU
223 init_bootmem_done = 1;
224#endif
225 paging_init();
226}
227
228#ifdef CONFIG_BLK_DEV_INITRD
229void free_initrd_mem(unsigned long start, unsigned long end)
230{
231 free_reserved_area((void *)start, (void *)end, -1, "initrd");
232}
233#endif
234
235void free_initmem(void)
236{
237 free_initmem_default(-1);
238}
239
240void __init mem_init(void)
241{
242 high_memory = (void *)__va(memory_start + lowmem_size - 1);
243
244
245 free_all_bootmem();
246#ifdef CONFIG_HIGHMEM
247 highmem_setup();
248#endif
249
250 mem_init_print_info(NULL);
251#ifdef CONFIG_MMU
252 pr_info("Kernel virtual memory layout:\n");
253 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
254#ifdef CONFIG_HIGHMEM
255 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
256 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
257#endif
258 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
259 ioremap_bot, ioremap_base);
260 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
261 (unsigned long)VMALLOC_START, VMALLOC_END);
262#endif
263 mem_init_done = 1;
264}
265
266#ifndef CONFIG_MMU
267int page_is_ram(unsigned long pfn)
268{
269 return __range_ok(pfn, 0);
270}
271#else
272int page_is_ram(unsigned long pfn)
273{
274 return pfn < max_low_pfn;
275}
276
277
278
279
280static void mm_cmdline_setup(void)
281{
282 unsigned long maxmem = 0;
283 char *p = cmd_line;
284
285
286 p = strstr(cmd_line, "mem=");
287 if (p) {
288 p += 4;
289 maxmem = memparse(p, &p);
290 if (maxmem && memory_size > maxmem) {
291 memory_size = maxmem;
292 memblock.memory.regions[0].size = memory_size;
293 }
294 }
295}
296
297
298
299
300static void __init mmu_init_hw(void)
301{
302
303
304
305
306
307
308
309
310
311
312
313
314 __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
315 "mts rzpr, r11;"
316 : : : "r11");
317}
318
319
320
321
322
323
324
325
326asmlinkage void __init mmu_init(void)
327{
328 unsigned int kstart, ksize;
329
330 if (!memblock.reserved.cnt) {
331 pr_emerg("Error memory count\n");
332 machine_restart(NULL);
333 }
334
335 if ((u32) memblock.memory.regions[0].size < 0x400000) {
336 pr_emerg("Memory must be greater than 4MB\n");
337 machine_restart(NULL);
338 }
339
340 if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
341 pr_emerg("Kernel size is greater than memory node\n");
342 machine_restart(NULL);
343 }
344
345
346 memory_start = (u32) memblock.memory.regions[0].base;
347 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
348
349 if (lowmem_size > CONFIG_LOWMEM_SIZE) {
350 lowmem_size = CONFIG_LOWMEM_SIZE;
351#ifndef CONFIG_HIGHMEM
352 memory_size = lowmem_size;
353#endif
354 }
355
356 mm_cmdline_setup();
357
358
359
360
361
362 kstart = __pa(CONFIG_KERNEL_START);
363
364 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
365 memblock_reserve(kstart, ksize);
366
367#if defined(CONFIG_BLK_DEV_INITRD)
368
369 if (initrd_start) {
370 unsigned long size;
371 size = initrd_end - initrd_start;
372 memblock_reserve(__virt_to_phys(initrd_start), size);
373 }
374#endif
375
376
377 mmu_init_hw();
378
379
380 mapin_ram();
381
382
383#ifdef CONFIG_HIGHMEM
384 ioremap_base = ioremap_bot = PKMAP_BASE;
385#else
386 ioremap_base = ioremap_bot = FIXADDR_START;
387#endif
388
389
390 mmu_context_init();
391
392
393
394
395 memblock_set_current_limit(memory_start + lowmem_size - 1);
396}
397
398
399void __init *early_get_page(void)
400{
401 void *p;
402 if (init_bootmem_done) {
403 p = alloc_bootmem_pages(PAGE_SIZE);
404 } else {
405
406
407
408
409 p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
410 memory_start + kernel_tlb));
411 }
412 return p;
413}
414
415#endif
416
417void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask)
418{
419 if (mem_init_done)
420 return kmalloc(size, mask);
421 else
422 return alloc_bootmem(size);
423}
424
425void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
426{
427 void *p;
428
429 if (mem_init_done)
430 p = kzalloc(size, mask);
431 else {
432 p = alloc_bootmem(size);
433 if (p)
434 memset(p, 0, size);
435 }
436 return p;
437}
438