1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9
10#include <linux/bootmem.h>
11#include <linux/efi.h>
12#include <linux/elf.h>
13#include <linux/memblock.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/module.h>
17#include <linux/personality.h>
18#include <linux/reboot.h>
19#include <linux/slab.h>
20#include <linux/swap.h>
21#include <linux/proc_fs.h>
22#include <linux/bitops.h>
23#include <linux/kexec.h>
24
25#include <asm/dma.h>
26#include <asm/io.h>
27#include <asm/machvec.h>
28#include <asm/numa.h>
29#include <asm/patch.h>
30#include <asm/pgalloc.h>
31#include <asm/sal.h>
32#include <asm/sections.h>
33#include <asm/tlb.h>
34#include <asm/uaccess.h>
35#include <asm/unistd.h>
36#include <asm/mca.h>
37
38extern void ia64_tlb_init (void);
39
40unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
41
42#ifdef CONFIG_VIRTUAL_MEM_MAP
43unsigned long VMALLOC_END = VMALLOC_END_INIT;
44EXPORT_SYMBOL(VMALLOC_END);
45struct page *vmem_map;
46EXPORT_SYMBOL(vmem_map);
47#endif
48
49struct page *zero_page_memmap_ptr;
50EXPORT_SYMBOL(zero_page_memmap_ptr);
51
52void
53__ia64_sync_icache_dcache (pte_t pte)
54{
55 unsigned long addr;
56 struct page *page;
57
58 page = pte_page(pte);
59 addr = (unsigned long) page_address(page);
60
61 if (test_bit(PG_arch_1, &page->flags))
62 return;
63
64 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
65 set_bit(PG_arch_1, &page->flags);
66}
67
68
69
70
71
72
73void
74dma_mark_clean(void *addr, size_t size)
75{
76 unsigned long pg_addr, end;
77
78 pg_addr = PAGE_ALIGN((unsigned long) addr);
79 end = (unsigned long) addr + size;
80 while (pg_addr + PAGE_SIZE <= end) {
81 struct page *page = virt_to_page(pg_addr);
82 set_bit(PG_arch_1, &page->flags);
83 pg_addr += PAGE_SIZE;
84 }
85}
86
87inline void
88ia64_set_rbs_bot (void)
89{
90 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
91
92 if (stack_size > MAX_USER_STACK_SIZE)
93 stack_size = MAX_USER_STACK_SIZE;
94 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
95}
96
97
98
99
100
101
102
103void
104ia64_init_addr_space (void)
105{
106 struct vm_area_struct *vma;
107
108 ia64_set_rbs_bot();
109
110
111
112
113
114
115 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
116 if (vma) {
117 INIT_LIST_HEAD(&vma->anon_vma_chain);
118 vma->vm_mm = current->mm;
119 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
120 vma->vm_end = vma->vm_start + PAGE_SIZE;
121 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
122 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
123 down_write(¤t->mm->mmap_sem);
124 if (insert_vm_struct(current->mm, vma)) {
125 up_write(¤t->mm->mmap_sem);
126 kmem_cache_free(vm_area_cachep, vma);
127 return;
128 }
129 up_write(¤t->mm->mmap_sem);
130 }
131
132
133 if (!(current->personality & MMAP_PAGE_ZERO)) {
134 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
135 if (vma) {
136 INIT_LIST_HEAD(&vma->anon_vma_chain);
137 vma->vm_mm = current->mm;
138 vma->vm_end = PAGE_SIZE;
139 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
140 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
141 VM_DONTEXPAND | VM_DONTDUMP;
142 down_write(¤t->mm->mmap_sem);
143 if (insert_vm_struct(current->mm, vma)) {
144 up_write(¤t->mm->mmap_sem);
145 kmem_cache_free(vm_area_cachep, vma);
146 return;
147 }
148 up_write(¤t->mm->mmap_sem);
149 }
150 }
151}
152
153void
154free_initmem (void)
155{
156 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
157 -1, "unused kernel");
158}
159
160void __init
161free_initrd_mem (unsigned long start, unsigned long end)
162{
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194 start = PAGE_ALIGN(start);
195 end = end & PAGE_MASK;
196
197 if (start < end)
198 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
199
200 for (; start < end; start += PAGE_SIZE) {
201 if (!virt_addr_valid(start))
202 continue;
203 free_reserved_page(virt_to_page(start));
204 }
205}
206
207
208
209
210static struct page * __init
211put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
212{
213 pgd_t *pgd;
214 pud_t *pud;
215 pmd_t *pmd;
216 pte_t *pte;
217
218 pgd = pgd_offset_k(address);
219
220 {
221 pud = pud_alloc(&init_mm, pgd, address);
222 if (!pud)
223 goto out;
224 pmd = pmd_alloc(&init_mm, pud, address);
225 if (!pmd)
226 goto out;
227 pte = pte_alloc_kernel(pmd, address);
228 if (!pte)
229 goto out;
230 if (!pte_none(*pte))
231 goto out;
232 set_pte(pte, mk_pte(page, pgprot));
233 }
234 out:
235
236 return page;
237}
238
239static void __init
240setup_gate (void)
241{
242 struct page *page;
243
244
245
246
247
248
249 page = virt_to_page(ia64_imva(__start_gate_section));
250 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
251#ifdef HAVE_BUGGY_SEGREL
252 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
253 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
254#else
255 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
256
257 {
258 unsigned long addr;
259
260 for (addr = GATE_ADDR + PAGE_SIZE;
261 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
262 addr += PAGE_SIZE)
263 {
264 put_kernel_page(ZERO_PAGE(0), addr,
265 PAGE_READONLY);
266 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
267 PAGE_READONLY);
268 }
269 }
270#endif
271 ia64_patch_gate();
272}
273
274static struct vm_area_struct gate_vma;
275
276static int __init gate_vma_init(void)
277{
278 gate_vma.vm_mm = NULL;
279 gate_vma.vm_start = FIXADDR_USER_START;
280 gate_vma.vm_end = FIXADDR_USER_END;
281 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
282 gate_vma.vm_page_prot = __P101;
283
284 return 0;
285}
286__initcall(gate_vma_init);
287
288struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
289{
290 return &gate_vma;
291}
292
293int in_gate_area_no_mm(unsigned long addr)
294{
295 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
296 return 1;
297 return 0;
298}
299
300int in_gate_area(struct mm_struct *mm, unsigned long addr)
301{
302 return in_gate_area_no_mm(addr);
303}
304
305void ia64_mmu_init(void *my_cpu_data)
306{
307 unsigned long pta, impl_va_bits;
308 extern void tlb_init(void);
309
310#ifdef CONFIG_DISABLE_VHPT
311# define VHPT_ENABLE_BIT 0
312#else
313# define VHPT_ENABLE_BIT 1
314#endif
315
316
317
318
319
320
321
322
323
324
325
326
327# define pte_bits 3
328# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
329
330
331
332
333
334
335
336# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
337# define POW2(n) (1ULL << (n))
338
339 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
340
341 if (impl_va_bits < 51 || impl_va_bits > 61)
342 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
343
344
345
346
347
348
349 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
350 (mapped_space_bits > impl_va_bits - 1))
351 panic("Cannot build a big enough virtual-linear page table"
352 " to cover mapped address space.\n"
353 " Try using a smaller page size.\n");
354
355
356
357 pta = POW2(61) - POW2(vmlpt_bits);
358
359
360
361
362
363
364
365 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
366
367 ia64_tlb_init();
368
369#ifdef CONFIG_HUGETLB_PAGE
370 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
371 ia64_srlz_d();
372#endif
373}
374
375#ifdef CONFIG_VIRTUAL_MEM_MAP
376int vmemmap_find_next_valid_pfn(int node, int i)
377{
378 unsigned long end_address, hole_next_pfn;
379 unsigned long stop_address;
380 pg_data_t *pgdat = NODE_DATA(node);
381
382 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
383 end_address = PAGE_ALIGN(end_address);
384 stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
385
386 do {
387 pgd_t *pgd;
388 pud_t *pud;
389 pmd_t *pmd;
390 pte_t *pte;
391
392 pgd = pgd_offset_k(end_address);
393 if (pgd_none(*pgd)) {
394 end_address += PGDIR_SIZE;
395 continue;
396 }
397
398 pud = pud_offset(pgd, end_address);
399 if (pud_none(*pud)) {
400 end_address += PUD_SIZE;
401 continue;
402 }
403
404 pmd = pmd_offset(pud, end_address);
405 if (pmd_none(*pmd)) {
406 end_address += PMD_SIZE;
407 continue;
408 }
409
410 pte = pte_offset_kernel(pmd, end_address);
411retry_pte:
412 if (pte_none(*pte)) {
413 end_address += PAGE_SIZE;
414 pte++;
415 if ((end_address < stop_address) &&
416 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
417 goto retry_pte;
418 continue;
419 }
420
421 break;
422 } while (end_address < stop_address);
423
424 end_address = min(end_address, stop_address);
425 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
426 hole_next_pfn = end_address / sizeof(struct page);
427 return hole_next_pfn - pgdat->node_start_pfn;
428}
429
430int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
431{
432 unsigned long address, start_page, end_page;
433 struct page *map_start, *map_end;
434 int node;
435 pgd_t *pgd;
436 pud_t *pud;
437 pmd_t *pmd;
438 pte_t *pte;
439
440 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
441 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
442
443 start_page = (unsigned long) map_start & PAGE_MASK;
444 end_page = PAGE_ALIGN((unsigned long) map_end);
445 node = paddr_to_nid(__pa(start));
446
447 for (address = start_page; address < end_page; address += PAGE_SIZE) {
448 pgd = pgd_offset_k(address);
449 if (pgd_none(*pgd))
450 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
451 pud = pud_offset(pgd, address);
452
453 if (pud_none(*pud))
454 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
455 pmd = pmd_offset(pud, address);
456
457 if (pmd_none(*pmd))
458 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
459 pte = pte_offset_kernel(pmd, address);
460
461 if (pte_none(*pte))
462 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
463 PAGE_KERNEL));
464 }
465 return 0;
466}
467
468struct memmap_init_callback_data {
469 struct page *start;
470 struct page *end;
471 int nid;
472 unsigned long zone;
473};
474
475static int __meminit
476virtual_memmap_init(u64 start, u64 end, void *arg)
477{
478 struct memmap_init_callback_data *args;
479 struct page *map_start, *map_end;
480
481 args = (struct memmap_init_callback_data *) arg;
482 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
483 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
484
485 if (map_start < args->start)
486 map_start = args->start;
487 if (map_end > args->end)
488 map_end = args->end;
489
490
491
492
493
494
495 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
496 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
497 / sizeof(struct page));
498
499 if (map_start < map_end)
500 memmap_init_zone((unsigned long)(map_end - map_start),
501 args->nid, args->zone, page_to_pfn(map_start),
502 MEMMAP_EARLY);
503 return 0;
504}
505
506void __meminit
507memmap_init (unsigned long size, int nid, unsigned long zone,
508 unsigned long start_pfn)
509{
510 if (!vmem_map)
511 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
512 else {
513 struct page *start;
514 struct memmap_init_callback_data args;
515
516 start = pfn_to_page(start_pfn);
517 args.start = start;
518 args.end = start + size;
519 args.nid = nid;
520 args.zone = zone;
521
522 efi_memmap_walk(virtual_memmap_init, &args);
523 }
524}
525
526int
527ia64_pfn_valid (unsigned long pfn)
528{
529 char byte;
530 struct page *pg = pfn_to_page(pfn);
531
532 return (__get_user(byte, (char __user *) pg) == 0)
533 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
534 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
535}
536EXPORT_SYMBOL(ia64_pfn_valid);
537
538int __init find_largest_hole(u64 start, u64 end, void *arg)
539{
540 u64 *max_gap = arg;
541
542 static u64 last_end = PAGE_OFFSET;
543
544
545
546 if (*max_gap < (start - last_end))
547 *max_gap = start - last_end;
548 last_end = end;
549 return 0;
550}
551
552#endif
553
554int __init register_active_ranges(u64 start, u64 len, int nid)
555{
556 u64 end = start + len;
557
558#ifdef CONFIG_KEXEC
559 if (start > crashk_res.start && start < crashk_res.end)
560 start = crashk_res.end;
561 if (end > crashk_res.start && end < crashk_res.end)
562 end = crashk_res.start;
563#endif
564
565 if (start < end)
566 memblock_add_node(__pa(start), end - start, nid);
567 return 0;
568}
569
570int
571find_max_min_low_pfn (u64 start, u64 end, void *arg)
572{
573 unsigned long pfn_start, pfn_end;
574#ifdef CONFIG_FLATMEM
575 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
576 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
577#else
578 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
579 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
580#endif
581 min_low_pfn = min(min_low_pfn, pfn_start);
582 max_low_pfn = max(max_low_pfn, pfn_end);
583 return 0;
584}
585
586
587
588
589
590
591
592
593
594static int nolwsys __initdata;
595
596static int __init
597nolwsys_setup (char *s)
598{
599 nolwsys = 1;
600 return 1;
601}
602
603__setup("nolwsys", nolwsys_setup);
604
605void __init
606mem_init (void)
607{
608 int i;
609
610 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
611 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
612 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
613
614#ifdef CONFIG_PCI
615
616
617
618
619
620 platform_dma_init();
621#endif
622
623#ifdef CONFIG_FLATMEM
624 BUG_ON(!mem_map);
625#endif
626
627 set_max_mapnr(max_low_pfn);
628 high_memory = __va(max_low_pfn * PAGE_SIZE);
629 free_all_bootmem();
630 mem_init_print_info(NULL);
631
632
633
634
635
636
637 for (i = 0; i < NR_syscalls; ++i) {
638 extern unsigned long fsyscall_table[NR_syscalls];
639 extern unsigned long sys_call_table[NR_syscalls];
640
641 if (!fsyscall_table[i] || nolwsys)
642 fsyscall_table[i] = sys_call_table[i] | 1;
643 }
644 setup_gate();
645}
646
647#ifdef CONFIG_MEMORY_HOTPLUG
648int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
649{
650 pg_data_t *pgdat;
651 struct zone *zone;
652 unsigned long start_pfn = start >> PAGE_SHIFT;
653 unsigned long nr_pages = size >> PAGE_SHIFT;
654 int ret;
655
656 pgdat = NODE_DATA(nid);
657
658 zone = pgdat->node_zones +
659 zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
660 ret = __add_pages(nid, zone, start_pfn, nr_pages);
661
662 if (ret)
663 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
664 __func__, ret);
665
666 return ret;
667}
668
669#ifdef CONFIG_MEMORY_HOTREMOVE
670int arch_remove_memory(u64 start, u64 size)
671{
672 unsigned long start_pfn = start >> PAGE_SHIFT;
673 unsigned long nr_pages = size >> PAGE_SHIFT;
674 struct zone *zone;
675 int ret;
676
677 zone = page_zone(pfn_to_page(start_pfn));
678 ret = __remove_pages(zone, start_pfn, nr_pages);
679 if (ret)
680 pr_warn("%s: Problem encountered in __remove_pages() as"
681 " ret=%d\n", __func__, ret);
682
683 return ret;
684}
685#endif
686#endif
687
688
689
690
691
692
693
694void show_mem(unsigned int filter)
695{
696 int total_reserved = 0;
697 unsigned long total_present = 0;
698 pg_data_t *pgdat;
699
700 printk(KERN_INFO "Mem-info:\n");
701 show_free_areas(filter);
702 printk(KERN_INFO "Node memory in pages:\n");
703 for_each_online_pgdat(pgdat) {
704 unsigned long present;
705 unsigned long flags;
706 int reserved = 0;
707 int nid = pgdat->node_id;
708 int zoneid;
709
710 if (skip_free_areas_node(filter, nid))
711 continue;
712 pgdat_resize_lock(pgdat, &flags);
713
714 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
715 struct zone *zone = &pgdat->node_zones[zoneid];
716 if (!populated_zone(zone))
717 continue;
718
719 reserved += zone->present_pages - zone->managed_pages;
720 }
721 present = pgdat->node_present_pages;
722
723 pgdat_resize_unlock(pgdat, &flags);
724 total_present += present;
725 total_reserved += reserved;
726 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, ",
727 nid, present, reserved);
728 }
729 printk(KERN_INFO "%ld pages of RAM\n", total_present);
730 printk(KERN_INFO "%d reserved pages\n", total_reserved);
731 printk(KERN_INFO "Total of %ld pages in page table cache\n",
732 quicklist_total_size());
733 printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages());
734}
735