1
2
3
4
5
6
7
8
9
10#include <linux/dma-map-ops.h>
11#include <linux/memblock.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/initrd.h>
16#include <linux/pagemap.h>
17#include <linux/pfn.h>
18#include <linux/slab.h>
19#include <linux/swap.h>
20#include <linux/export.h>
21
22#include <asm/page.h>
23#include <asm/mmu_context.h>
24#include <asm/pgalloc.h>
25#include <asm/sections.h>
26#include <asm/tlb.h>
27#include <asm/fixmap.h>
28
29
30int mem_init_done;
31
32char *klimit = _end;
33
34
35
36
37
38unsigned long memory_start;
39EXPORT_SYMBOL(memory_start);
40unsigned long memory_size;
41EXPORT_SYMBOL(memory_size);
42unsigned long lowmem_size;
43
44EXPORT_SYMBOL(min_low_pfn);
45EXPORT_SYMBOL(max_low_pfn);
46
47#ifdef CONFIG_HIGHMEM
48static void __init highmem_init(void)
49{
50 pr_debug("%x\n", (u32)PKMAP_BASE);
51 map_page(PKMAP_BASE, 0, 0);
52 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
53}
54
55static void __meminit highmem_setup(void)
56{
57 unsigned long pfn;
58
59 for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
60 struct page *page = pfn_to_page(pfn);
61
62
63 if (!memblock_is_reserved(pfn << PAGE_SHIFT))
64 free_highmem_page(page);
65 }
66}
67#endif
68
69
70
71
72static void __init paging_init(void)
73{
74 unsigned long zones_size[MAX_NR_ZONES];
75 int idx;
76
77
78 for (idx = 0; idx < __end_of_fixed_addresses; idx++)
79 clear_fixmap(idx);
80
81
82 memset(zones_size, 0, sizeof(zones_size));
83
84#ifdef CONFIG_HIGHMEM
85 highmem_init();
86
87 zones_size[ZONE_DMA] = max_low_pfn;
88 zones_size[ZONE_HIGHMEM] = max_pfn;
89#else
90 zones_size[ZONE_DMA] = max_pfn;
91#endif
92
93
94 free_area_init(zones_size);
95}
96
97void __init setup_memory(void)
98{
99
100
101
102
103
104
105
106
107
108
109
110 min_low_pfn = memory_start >> PAGE_SHIFT;
111
112 max_mapnr = memory_size >> PAGE_SHIFT;
113 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
114 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
115
116 pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
117 pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
118 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
119 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
120
121 paging_init();
122}
123
124void __init mem_init(void)
125{
126 high_memory = (void *)__va(memory_start + lowmem_size - 1);
127
128
129 memblock_free_all();
130#ifdef CONFIG_HIGHMEM
131 highmem_setup();
132#endif
133
134 mem_init_done = 1;
135}
136
137int page_is_ram(unsigned long pfn)
138{
139 return pfn < max_low_pfn;
140}
141
142
143
144
145static void mm_cmdline_setup(void)
146{
147 unsigned long maxmem = 0;
148 char *p = cmd_line;
149
150
151 p = strstr(cmd_line, "mem=");
152 if (p) {
153 p += 4;
154 maxmem = memparse(p, &p);
155 if (maxmem && memory_size > maxmem) {
156 memory_size = maxmem;
157 memblock.memory.regions[0].size = memory_size;
158 }
159 }
160}
161
162
163
164
165static void __init mmu_init_hw(void)
166{
167
168
169
170
171
172
173
174
175
176
177
178
179 __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
180 "mts rzpr, r11;"
181 : : : "r11");
182}
183
184
185
186
187
188
189
190
191asmlinkage void __init mmu_init(void)
192{
193 unsigned int kstart, ksize;
194
195 if (!memblock.reserved.cnt) {
196 pr_emerg("Error memory count\n");
197 machine_restart(NULL);
198 }
199
200 if ((u32) memblock.memory.regions[0].size < 0x400000) {
201 pr_emerg("Memory must be greater than 4MB\n");
202 machine_restart(NULL);
203 }
204
205 if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
206 pr_emerg("Kernel size is greater than memory node\n");
207 machine_restart(NULL);
208 }
209
210
211 memory_start = (u32) memblock.memory.regions[0].base;
212 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
213
214 if (lowmem_size > CONFIG_LOWMEM_SIZE) {
215 lowmem_size = CONFIG_LOWMEM_SIZE;
216#ifndef CONFIG_HIGHMEM
217 memory_size = lowmem_size;
218#endif
219 }
220
221 mm_cmdline_setup();
222
223
224
225
226
227 kstart = __pa(CONFIG_KERNEL_START);
228
229 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
230 memblock_reserve(kstart, ksize);
231
232#if defined(CONFIG_BLK_DEV_INITRD)
233
234 if (initrd_start) {
235 unsigned long size;
236 size = initrd_end - initrd_start;
237 memblock_reserve(__virt_to_phys(initrd_start), size);
238 }
239#endif
240
241
242 mmu_init_hw();
243
244
245 mapin_ram();
246
247
248#ifdef CONFIG_HIGHMEM
249 ioremap_base = ioremap_bot = PKMAP_BASE;
250#else
251 ioremap_base = ioremap_bot = FIXADDR_START;
252#endif
253
254
255 mmu_context_init();
256
257
258
259
260 memblock_set_current_limit(memory_start + lowmem_size - 1);
261
262 parse_early_param();
263
264
265 dma_contiguous_reserve(memory_start + lowmem_size - 1);
266}
267
268
269void __init *early_get_page(void)
270{
271
272
273
274
275 return memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
276 MEMBLOCK_LOW_LIMIT, memory_start + kernel_tlb,
277 NUMA_NO_NODE);
278}
279
280void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
281{
282 void *p;
283
284 if (mem_init_done) {
285 p = kzalloc(size, mask);
286 } else {
287 p = memblock_alloc(size, SMP_CACHE_BYTES);
288 if (!p)
289 panic("%s: Failed to allocate %zu bytes\n",
290 __func__, size);
291 }
292
293 return p;
294}
295