1
2
3
4
5
6
7
8
9
10
11#include <linux/bug.h>
12#include <linux/init.h>
13#include <linux/export.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/smp.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/bootmem.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
28#include <linux/proc_fs.h>
29#include <linux/pfn.h>
30#include <linux/hardirq.h>
31#include <linux/gfp.h>
32#include <linux/kcore.h>
33#include <linux/export.h>
34#include <linux/initrd.h>
35
36#include <asm/asm-offsets.h>
37#include <asm/bootinfo.h>
38#include <asm/cachectl.h>
39#include <asm/cpu.h>
40#include <asm/dma.h>
41#include <asm/kmap_types.h>
42#include <asm/maar.h>
43#include <asm/mmu_context.h>
44#include <asm/sections.h>
45#include <asm/pgtable.h>
46#include <asm/pgalloc.h>
47#include <asm/tlb.h>
48#include <asm/fixmap.h>
49#include <asm/maar.h>
50
51
52
53
54
55
56
57
58unsigned long empty_zero_page, zero_page_mask;
59EXPORT_SYMBOL_GPL(empty_zero_page);
60EXPORT_SYMBOL(zero_page_mask);
61
62
63
64
65void setup_zero_pages(void)
66{
67 unsigned int order, i;
68 struct page *page;
69
70 if (cpu_has_vce)
71 order = 3;
72 else
73 order = 0;
74
75 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
76 if (!empty_zero_page)
77 panic("Oh boy, that early out of memory?");
78
79 page = virt_to_page((void *)empty_zero_page);
80 split_page(page, order);
81 for (i = 0; i < (1 << order); i++, page++)
82 mark_page_reserved(page);
83
84 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
85}
86
87static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
88{
89 enum fixed_addresses idx;
90 unsigned long vaddr, flags, entrylo;
91 unsigned long old_ctx;
92 pte_t pte;
93 int tlbidx;
94
95 BUG_ON(Page_dcache_dirty(page));
96
97 preempt_disable();
98 pagefault_disable();
99 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
100 idx += in_interrupt() ? FIX_N_COLOURS : 0;
101 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
102 pte = mk_pte(page, prot);
103#if defined(CONFIG_XPA)
104 entrylo = pte_to_entrylo(pte.pte_high);
105#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
106 entrylo = pte.pte_high;
107#else
108 entrylo = pte_to_entrylo(pte_val(pte));
109#endif
110
111 local_irq_save(flags);
112 old_ctx = read_c0_entryhi();
113 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
114 write_c0_entrylo0(entrylo);
115 write_c0_entrylo1(entrylo);
116#ifdef CONFIG_XPA
117 if (cpu_has_xpa) {
118 entrylo = (pte.pte_low & _PFNX_MASK);
119 writex_c0_entrylo0(entrylo);
120 writex_c0_entrylo1(entrylo);
121 }
122#endif
123 tlbidx = num_wired_entries();
124 write_c0_wired(tlbidx + 1);
125 write_c0_index(tlbidx);
126 mtc0_tlbw_hazard();
127 tlb_write_indexed();
128 tlbw_use_hazard();
129 write_c0_entryhi(old_ctx);
130 local_irq_restore(flags);
131
132 return (void*) vaddr;
133}
134
135void *kmap_coherent(struct page *page, unsigned long addr)
136{
137 return __kmap_pgprot(page, addr, PAGE_KERNEL);
138}
139
140void *kmap_noncoherent(struct page *page, unsigned long addr)
141{
142 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
143}
144
145void kunmap_coherent(void)
146{
147 unsigned int wired;
148 unsigned long flags, old_ctx;
149
150 local_irq_save(flags);
151 old_ctx = read_c0_entryhi();
152 wired = num_wired_entries() - 1;
153 write_c0_wired(wired);
154 write_c0_index(wired);
155 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
156 write_c0_entrylo0(0);
157 write_c0_entrylo1(0);
158 mtc0_tlbw_hazard();
159 tlb_write_indexed();
160 tlbw_use_hazard();
161 write_c0_entryhi(old_ctx);
162 local_irq_restore(flags);
163 pagefault_enable();
164 preempt_enable();
165}
166
167void copy_user_highpage(struct page *to, struct page *from,
168 unsigned long vaddr, struct vm_area_struct *vma)
169{
170 void *vfrom, *vto;
171
172 vto = kmap_atomic(to);
173 if (cpu_has_dc_aliases &&
174 page_mapcount(from) && !Page_dcache_dirty(from)) {
175 vfrom = kmap_coherent(from, vaddr);
176 copy_page(vto, vfrom);
177 kunmap_coherent();
178 } else {
179 vfrom = kmap_atomic(from);
180 copy_page(vto, vfrom);
181 kunmap_atomic(vfrom);
182 }
183 if ((!cpu_has_ic_fills_f_dc) ||
184 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
185 flush_data_cache_page((unsigned long)vto);
186 kunmap_atomic(vto);
187
188 smp_wmb();
189}
190
191void copy_to_user_page(struct vm_area_struct *vma,
192 struct page *page, unsigned long vaddr, void *dst, const void *src,
193 unsigned long len)
194{
195 if (cpu_has_dc_aliases &&
196 page_mapcount(page) && !Page_dcache_dirty(page)) {
197 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
198 memcpy(vto, src, len);
199 kunmap_coherent();
200 } else {
201 memcpy(dst, src, len);
202 if (cpu_has_dc_aliases)
203 SetPageDcacheDirty(page);
204 }
205 if (vma->vm_flags & VM_EXEC)
206 flush_cache_page(vma, vaddr, page_to_pfn(page));
207}
208
209void copy_from_user_page(struct vm_area_struct *vma,
210 struct page *page, unsigned long vaddr, void *dst, const void *src,
211 unsigned long len)
212{
213 if (cpu_has_dc_aliases &&
214 page_mapcount(page) && !Page_dcache_dirty(page)) {
215 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
216 memcpy(dst, vfrom, len);
217 kunmap_coherent();
218 } else {
219 memcpy(dst, src, len);
220 if (cpu_has_dc_aliases)
221 SetPageDcacheDirty(page);
222 }
223}
224EXPORT_SYMBOL_GPL(copy_from_user_page);
225
226void __init fixrange_init(unsigned long start, unsigned long end,
227 pgd_t *pgd_base)
228{
229#ifdef CONFIG_HIGHMEM
230 pgd_t *pgd;
231 pud_t *pud;
232 pmd_t *pmd;
233 pte_t *pte;
234 int i, j, k;
235 unsigned long vaddr;
236
237 vaddr = start;
238 i = __pgd_offset(vaddr);
239 j = __pud_offset(vaddr);
240 k = __pmd_offset(vaddr);
241 pgd = pgd_base + i;
242
243 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
244 pud = (pud_t *)pgd;
245 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
246 pmd = (pmd_t *)pud;
247 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
248 if (pmd_none(*pmd)) {
249 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
250 set_pmd(pmd, __pmd((unsigned long)pte));
251 BUG_ON(pte != pte_offset_kernel(pmd, 0));
252 }
253 vaddr += PMD_SIZE;
254 }
255 k = 0;
256 }
257 j = 0;
258 }
259#endif
260}
261
262unsigned __weak platform_maar_init(unsigned num_pairs)
263{
264 struct maar_config cfg[BOOT_MEM_MAP_MAX];
265 unsigned i, num_configured, num_cfg = 0;
266
267 for (i = 0; i < boot_mem_map.nr_map; i++) {
268 switch (boot_mem_map.map[i].type) {
269 case BOOT_MEM_RAM:
270 case BOOT_MEM_INIT_RAM:
271 break;
272 default:
273 continue;
274 }
275
276
277 cfg[num_cfg].lower = boot_mem_map.map[i].addr;
278 cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
279
280
281 cfg[num_cfg].upper = boot_mem_map.map[i].addr +
282 boot_mem_map.map[i].size;
283 cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
284
285 cfg[num_cfg].attrs = MIPS_MAAR_S;
286 num_cfg++;
287 }
288
289 num_configured = maar_config(cfg, num_cfg, num_pairs);
290 if (num_configured < num_cfg)
291 pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
292 num_pairs, num_cfg);
293
294 return num_configured;
295}
296
297void maar_init(void)
298{
299 unsigned num_maars, used, i;
300 phys_addr_t lower, upper, attr;
301 static struct {
302 struct maar_config cfgs[3];
303 unsigned used;
304 } recorded = { { { 0 } }, 0 };
305
306 if (!cpu_has_maar)
307 return;
308
309
310 write_c0_maari(~0);
311 back_to_back_c0_hazard();
312 num_maars = read_c0_maari() + 1;
313
314
315 WARN_ON(num_maars % 2);
316
317
318 if (recorded.used) {
319 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
320 BUG_ON(used != recorded.used);
321 } else {
322
323 used = platform_maar_init(num_maars / 2);
324 }
325
326
327 for (i = (used * 2); i < num_maars; i++) {
328 write_c0_maari(i);
329 back_to_back_c0_hazard();
330 write_c0_maar(0);
331 back_to_back_c0_hazard();
332 }
333
334 if (recorded.used)
335 return;
336
337 pr_info("MAAR configuration:\n");
338 for (i = 0; i < num_maars; i += 2) {
339 write_c0_maari(i);
340 back_to_back_c0_hazard();
341 upper = read_c0_maar();
342
343 write_c0_maari(i + 1);
344 back_to_back_c0_hazard();
345 lower = read_c0_maar();
346
347 attr = lower & upper;
348 lower = (lower & MIPS_MAAR_ADDR) << 4;
349 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
350
351 pr_info(" [%d]: ", i / 2);
352 if (!(attr & MIPS_MAAR_VL)) {
353 pr_cont("disabled\n");
354 continue;
355 }
356
357 pr_cont("%pa-%pa", &lower, &upper);
358
359 if (attr & MIPS_MAAR_S)
360 pr_cont(" speculate");
361
362 pr_cont("\n");
363
364
365 if (used <= ARRAY_SIZE(recorded.cfgs)) {
366 recorded.cfgs[recorded.used].lower = lower;
367 recorded.cfgs[recorded.used].upper = upper;
368 recorded.cfgs[recorded.used].attrs = attr;
369 recorded.used++;
370 }
371 }
372}
373
374#ifndef CONFIG_NEED_MULTIPLE_NODES
375int page_is_ram(unsigned long pagenr)
376{
377 int i;
378
379 for (i = 0; i < boot_mem_map.nr_map; i++) {
380 unsigned long addr, end;
381
382 switch (boot_mem_map.map[i].type) {
383 case BOOT_MEM_RAM:
384 case BOOT_MEM_INIT_RAM:
385 break;
386 default:
387
388 continue;
389 }
390
391 addr = PFN_UP(boot_mem_map.map[i].addr);
392 end = PFN_DOWN(boot_mem_map.map[i].addr +
393 boot_mem_map.map[i].size);
394
395 if (pagenr >= addr && pagenr < end)
396 return 1;
397 }
398
399 return 0;
400}
401
402void __init paging_init(void)
403{
404 unsigned long max_zone_pfns[MAX_NR_ZONES];
405 unsigned long lastpfn __maybe_unused;
406
407 pagetable_init();
408
409#ifdef CONFIG_HIGHMEM
410 kmap_init();
411#endif
412#ifdef CONFIG_ZONE_DMA
413 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
414#endif
415#ifdef CONFIG_ZONE_DMA32
416 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
417#endif
418 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
419 lastpfn = max_low_pfn;
420#ifdef CONFIG_HIGHMEM
421 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
422 lastpfn = highend_pfn;
423
424 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
425 printk(KERN_WARNING "This processor doesn't support highmem."
426 " %ldk highmem ignored\n",
427 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
428 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
429 lastpfn = max_low_pfn;
430 }
431#endif
432
433 free_area_init_nodes(max_zone_pfns);
434}
435
436#ifdef CONFIG_64BIT
437static struct kcore_list kcore_kseg0;
438#endif
439
440static inline void mem_init_free_highmem(void)
441{
442#ifdef CONFIG_HIGHMEM
443 unsigned long tmp;
444
445 if (cpu_has_dc_aliases)
446 return;
447
448 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
449 struct page *page = pfn_to_page(tmp);
450
451 if (!page_is_ram(tmp))
452 SetPageReserved(page);
453 else
454 free_highmem_page(page);
455 }
456#endif
457}
458
459void __init mem_init(void)
460{
461#ifdef CONFIG_HIGHMEM
462#ifdef CONFIG_DISCONTIGMEM
463#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
464#endif
465 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
466#else
467 max_mapnr = max_low_pfn;
468#endif
469 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
470
471 maar_init();
472 free_all_bootmem();
473 setup_zero_pages();
474 mem_init_free_highmem();
475 mem_init_print_info(NULL);
476
477#ifdef CONFIG_64BIT
478 if ((unsigned long) &_text > (unsigned long) CKSEG0)
479
480
481 kclist_add(&kcore_kseg0, (void *) CKSEG0,
482 0x80000000 - 4, KCORE_TEXT);
483#endif
484}
485#endif
486
487void free_init_pages(const char *what, unsigned long begin, unsigned long end)
488{
489 unsigned long pfn;
490
491 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
492 struct page *page = pfn_to_page(pfn);
493 void *addr = phys_to_virt(PFN_PHYS(pfn));
494
495 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
496 free_reserved_page(page);
497 }
498 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
499}
500
501#ifdef CONFIG_BLK_DEV_INITRD
502void free_initrd_mem(unsigned long start, unsigned long end)
503{
504 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
505 "initrd");
506}
507#endif
508
509void (*free_init_pages_eva)(void *begin, void *end) = NULL;
510
511void __ref free_initmem(void)
512{
513 prom_free_prom_memory();
514
515
516
517
518
519 if (free_init_pages_eva)
520 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
521 else
522 free_initmem_default(POISON_FREE_INITMEM);
523}
524
525#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
526unsigned long pgd_current[NR_CPUS];
527#endif
528
529
530
531
532
533
534
535
536
537
538
539
540pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
541#ifndef __PAGETABLE_PUD_FOLDED
542pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
543#endif
544#ifndef __PAGETABLE_PMD_FOLDED
545pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
546EXPORT_SYMBOL_GPL(invalid_pmd_table);
547#endif
548pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
549EXPORT_SYMBOL(invalid_pte_table);
550