1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9
10#include <linux/bootmem.h>
11#include <linux/efi.h>
12#include <linux/elf.h>
13#include <linux/memblock.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/module.h>
17#include <linux/personality.h>
18#include <linux/reboot.h>
19#include <linux/slab.h>
20#include <linux/swap.h>
21#include <linux/proc_fs.h>
22#include <linux/bitops.h>
23#include <linux/kexec.h>
24
25#include <asm/dma.h>
26#include <asm/io.h>
27#include <asm/machvec.h>
28#include <asm/numa.h>
29#include <asm/patch.h>
30#include <asm/pgalloc.h>
31#include <asm/sal.h>
32#include <asm/sections.h>
33#include <asm/tlb.h>
34#include <asm/uaccess.h>
35#include <asm/unistd.h>
36#include <asm/mca.h>
37#include <asm/paravirt.h>
38
39extern void ia64_tlb_init (void);
40
41unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
42
43#ifdef CONFIG_VIRTUAL_MEM_MAP
44unsigned long VMALLOC_END = VMALLOC_END_INIT;
45EXPORT_SYMBOL(VMALLOC_END);
46struct page *vmem_map;
47EXPORT_SYMBOL(vmem_map);
48#endif
49
50struct page *zero_page_memmap_ptr;
51EXPORT_SYMBOL(zero_page_memmap_ptr);
52
53void
54__ia64_sync_icache_dcache (pte_t pte)
55{
56 unsigned long addr;
57 struct page *page;
58
59 page = pte_page(pte);
60 addr = (unsigned long) page_address(page);
61
62 if (test_bit(PG_arch_1, &page->flags))
63 return;
64
65 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
66 set_bit(PG_arch_1, &page->flags);
67}
68
69
70
71
72
73
74void
75dma_mark_clean(void *addr, size_t size)
76{
77 unsigned long pg_addr, end;
78
79 pg_addr = PAGE_ALIGN((unsigned long) addr);
80 end = (unsigned long) addr + size;
81 while (pg_addr + PAGE_SIZE <= end) {
82 struct page *page = virt_to_page(pg_addr);
83 set_bit(PG_arch_1, &page->flags);
84 pg_addr += PAGE_SIZE;
85 }
86}
87
88inline void
89ia64_set_rbs_bot (void)
90{
91 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
92
93 if (stack_size > MAX_USER_STACK_SIZE)
94 stack_size = MAX_USER_STACK_SIZE;
95 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
96}
97
98
99
100
101
102
103
104void
105ia64_init_addr_space (void)
106{
107 struct vm_area_struct *vma;
108
109 ia64_set_rbs_bot();
110
111
112
113
114
115
116 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
117 if (vma) {
118 INIT_LIST_HEAD(&vma->anon_vma_chain);
119 vma->vm_mm = current->mm;
120 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
121 vma->vm_end = vma->vm_start + PAGE_SIZE;
122 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
123 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
124 down_write(¤t->mm->mmap_sem);
125 if (insert_vm_struct(current->mm, vma)) {
126 up_write(¤t->mm->mmap_sem);
127 kmem_cache_free(vm_area_cachep, vma);
128 return;
129 }
130 up_write(¤t->mm->mmap_sem);
131 }
132
133
134 if (!(current->personality & MMAP_PAGE_ZERO)) {
135 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
136 if (vma) {
137 INIT_LIST_HEAD(&vma->anon_vma_chain);
138 vma->vm_mm = current->mm;
139 vma->vm_end = PAGE_SIZE;
140 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
141 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
142 down_write(¤t->mm->mmap_sem);
143 if (insert_vm_struct(current->mm, vma)) {
144 up_write(¤t->mm->mmap_sem);
145 kmem_cache_free(vm_area_cachep, vma);
146 return;
147 }
148 up_write(¤t->mm->mmap_sem);
149 }
150 }
151}
152
153void
154free_initmem (void)
155{
156 unsigned long addr, eaddr;
157
158 addr = (unsigned long) ia64_imva(__init_begin);
159 eaddr = (unsigned long) ia64_imva(__init_end);
160 while (addr < eaddr) {
161 ClearPageReserved(virt_to_page(addr));
162 init_page_count(virt_to_page(addr));
163 free_page(addr);
164 ++totalram_pages;
165 addr += PAGE_SIZE;
166 }
167 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
168 (__init_end - __init_begin) >> 10);
169}
170
171void __init
172free_initrd_mem (unsigned long start, unsigned long end)
173{
174 struct page *page;
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206 start = PAGE_ALIGN(start);
207 end = end & PAGE_MASK;
208
209 if (start < end)
210 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
211
212 for (; start < end; start += PAGE_SIZE) {
213 if (!virt_addr_valid(start))
214 continue;
215 page = virt_to_page(start);
216 ClearPageReserved(page);
217 init_page_count(page);
218 free_page(start);
219 ++totalram_pages;
220 }
221}
222
223
224
225
226static struct page * __init
227put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
228{
229 pgd_t *pgd;
230 pud_t *pud;
231 pmd_t *pmd;
232 pte_t *pte;
233
234 if (!PageReserved(page))
235 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
236 page_address(page));
237
238 pgd = pgd_offset_k(address);
239
240 {
241 pud = pud_alloc(&init_mm, pgd, address);
242 if (!pud)
243 goto out;
244 pmd = pmd_alloc(&init_mm, pud, address);
245 if (!pmd)
246 goto out;
247 pte = pte_alloc_kernel(pmd, address);
248 if (!pte)
249 goto out;
250 if (!pte_none(*pte))
251 goto out;
252 set_pte(pte, mk_pte(page, pgprot));
253 }
254 out:
255
256 return page;
257}
258
259static void __init
260setup_gate (void)
261{
262 void *gate_section;
263 struct page *page;
264
265
266
267
268
269
270 gate_section = paravirt_get_gate_section();
271 page = virt_to_page(ia64_imva(gate_section));
272 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
273#ifdef HAVE_BUGGY_SEGREL
274 page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
275 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
276#else
277 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
278
279 {
280 unsigned long addr;
281
282 for (addr = GATE_ADDR + PAGE_SIZE;
283 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
284 addr += PAGE_SIZE)
285 {
286 put_kernel_page(ZERO_PAGE(0), addr,
287 PAGE_READONLY);
288 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
289 PAGE_READONLY);
290 }
291 }
292#endif
293 ia64_patch_gate();
294}
295
296void __devinit
297ia64_mmu_init (void *my_cpu_data)
298{
299 unsigned long pta, impl_va_bits;
300 extern void __devinit tlb_init (void);
301
302#ifdef CONFIG_DISABLE_VHPT
303# define VHPT_ENABLE_BIT 0
304#else
305# define VHPT_ENABLE_BIT 1
306#endif
307
308
309
310
311
312
313
314
315
316
317
318
319# define pte_bits 3
320# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
321
322
323
324
325
326
327
328# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
329# define POW2(n) (1ULL << (n))
330
331 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
332
333 if (impl_va_bits < 51 || impl_va_bits > 61)
334 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
335
336
337
338
339
340
341 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
342 (mapped_space_bits > impl_va_bits - 1))
343 panic("Cannot build a big enough virtual-linear page table"
344 " to cover mapped address space.\n"
345 " Try using a smaller page size.\n");
346
347
348
349 pta = POW2(61) - POW2(vmlpt_bits);
350
351
352
353
354
355
356
357 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
358
359 ia64_tlb_init();
360
361#ifdef CONFIG_HUGETLB_PAGE
362 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
363 ia64_srlz_d();
364#endif
365}
366
367#ifdef CONFIG_VIRTUAL_MEM_MAP
368int vmemmap_find_next_valid_pfn(int node, int i)
369{
370 unsigned long end_address, hole_next_pfn;
371 unsigned long stop_address;
372 pg_data_t *pgdat = NODE_DATA(node);
373
374 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
375 end_address = PAGE_ALIGN(end_address);
376
377 stop_address = (unsigned long) &vmem_map[
378 pgdat->node_start_pfn + pgdat->node_spanned_pages];
379
380 do {
381 pgd_t *pgd;
382 pud_t *pud;
383 pmd_t *pmd;
384 pte_t *pte;
385
386 pgd = pgd_offset_k(end_address);
387 if (pgd_none(*pgd)) {
388 end_address += PGDIR_SIZE;
389 continue;
390 }
391
392 pud = pud_offset(pgd, end_address);
393 if (pud_none(*pud)) {
394 end_address += PUD_SIZE;
395 continue;
396 }
397
398 pmd = pmd_offset(pud, end_address);
399 if (pmd_none(*pmd)) {
400 end_address += PMD_SIZE;
401 continue;
402 }
403
404 pte = pte_offset_kernel(pmd, end_address);
405retry_pte:
406 if (pte_none(*pte)) {
407 end_address += PAGE_SIZE;
408 pte++;
409 if ((end_address < stop_address) &&
410 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
411 goto retry_pte;
412 continue;
413 }
414
415 break;
416 } while (end_address < stop_address);
417
418 end_address = min(end_address, stop_address);
419 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
420 hole_next_pfn = end_address / sizeof(struct page);
421 return hole_next_pfn - pgdat->node_start_pfn;
422}
423
424int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
425{
426 unsigned long address, start_page, end_page;
427 struct page *map_start, *map_end;
428 int node;
429 pgd_t *pgd;
430 pud_t *pud;
431 pmd_t *pmd;
432 pte_t *pte;
433
434 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
435 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
436
437 start_page = (unsigned long) map_start & PAGE_MASK;
438 end_page = PAGE_ALIGN((unsigned long) map_end);
439 node = paddr_to_nid(__pa(start));
440
441 for (address = start_page; address < end_page; address += PAGE_SIZE) {
442 pgd = pgd_offset_k(address);
443 if (pgd_none(*pgd))
444 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
445 pud = pud_offset(pgd, address);
446
447 if (pud_none(*pud))
448 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
449 pmd = pmd_offset(pud, address);
450
451 if (pmd_none(*pmd))
452 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
453 pte = pte_offset_kernel(pmd, address);
454
455 if (pte_none(*pte))
456 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
457 PAGE_KERNEL));
458 }
459 return 0;
460}
461
462struct memmap_init_callback_data {
463 struct page *start;
464 struct page *end;
465 int nid;
466 unsigned long zone;
467};
468
469static int __meminit
470virtual_memmap_init(u64 start, u64 end, void *arg)
471{
472 struct memmap_init_callback_data *args;
473 struct page *map_start, *map_end;
474
475 args = (struct memmap_init_callback_data *) arg;
476 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
477 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
478
479 if (map_start < args->start)
480 map_start = args->start;
481 if (map_end > args->end)
482 map_end = args->end;
483
484
485
486
487
488
489 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
490 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
491 / sizeof(struct page));
492
493 if (map_start < map_end)
494 memmap_init_zone((unsigned long)(map_end - map_start),
495 args->nid, args->zone, page_to_pfn(map_start),
496 MEMMAP_EARLY);
497 return 0;
498}
499
500void __meminit
501memmap_init (unsigned long size, int nid, unsigned long zone,
502 unsigned long start_pfn)
503{
504 if (!vmem_map)
505 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
506 else {
507 struct page *start;
508 struct memmap_init_callback_data args;
509
510 start = pfn_to_page(start_pfn);
511 args.start = start;
512 args.end = start + size;
513 args.nid = nid;
514 args.zone = zone;
515
516 efi_memmap_walk(virtual_memmap_init, &args);
517 }
518}
519
520int
521ia64_pfn_valid (unsigned long pfn)
522{
523 char byte;
524 struct page *pg = pfn_to_page(pfn);
525
526 return (__get_user(byte, (char __user *) pg) == 0)
527 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
528 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
529}
530EXPORT_SYMBOL(ia64_pfn_valid);
531
532int __init find_largest_hole(u64 start, u64 end, void *arg)
533{
534 u64 *max_gap = arg;
535
536 static u64 last_end = PAGE_OFFSET;
537
538
539
540 if (*max_gap < (start - last_end))
541 *max_gap = start - last_end;
542 last_end = end;
543 return 0;
544}
545
546#endif
547
548int __init register_active_ranges(u64 start, u64 len, int nid)
549{
550 u64 end = start + len;
551
552#ifdef CONFIG_KEXEC
553 if (start > crashk_res.start && start < crashk_res.end)
554 start = crashk_res.end;
555 if (end > crashk_res.start && end < crashk_res.end)
556 end = crashk_res.start;
557#endif
558
559 if (start < end)
560 memblock_add_node(__pa(start), end - start, nid);
561 return 0;
562}
563
564static int __init
565count_reserved_pages(u64 start, u64 end, void *arg)
566{
567 unsigned long num_reserved = 0;
568 unsigned long *count = arg;
569
570 for (; start < end; start += PAGE_SIZE)
571 if (PageReserved(virt_to_page(start)))
572 ++num_reserved;
573 *count += num_reserved;
574 return 0;
575}
576
577int
578find_max_min_low_pfn (u64 start, u64 end, void *arg)
579{
580 unsigned long pfn_start, pfn_end;
581#ifdef CONFIG_FLATMEM
582 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
583 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
584#else
585 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
586 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
587#endif
588 min_low_pfn = min(min_low_pfn, pfn_start);
589 max_low_pfn = max(max_low_pfn, pfn_end);
590 return 0;
591}
592
593
594
595
596
597
598
599
600
601static int nolwsys __initdata;
602
603static int __init
604nolwsys_setup (char *s)
605{
606 nolwsys = 1;
607 return 1;
608}
609
610__setup("nolwsys", nolwsys_setup);
611
612void __init
613mem_init (void)
614{
615 long reserved_pages, codesize, datasize, initsize;
616 pg_data_t *pgdat;
617 int i;
618
619 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
620 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
621 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
622
623#ifdef CONFIG_PCI
624
625
626
627
628
629 platform_dma_init();
630#endif
631
632#ifdef CONFIG_FLATMEM
633 BUG_ON(!mem_map);
634 max_mapnr = max_low_pfn;
635#endif
636
637 high_memory = __va(max_low_pfn * PAGE_SIZE);
638
639 for_each_online_pgdat(pgdat)
640 if (pgdat->bdata->node_bootmem_map)
641 totalram_pages += free_all_bootmem_node(pgdat);
642
643 reserved_pages = 0;
644 efi_memmap_walk(count_reserved_pages, &reserved_pages);
645
646 codesize = (unsigned long) _etext - (unsigned long) _stext;
647 datasize = (unsigned long) _edata - (unsigned long) _etext;
648 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
649
650 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
651 "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10),
652 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
653 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
654
655
656
657
658
659
660
661 for (i = 0; i < NR_syscalls; ++i) {
662 extern unsigned long sys_call_table[NR_syscalls];
663 unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
664
665 if (!fsyscall_table[i] || nolwsys)
666 fsyscall_table[i] = sys_call_table[i] | 1;
667 }
668 setup_gate();
669}
670
671#ifdef CONFIG_MEMORY_HOTPLUG
672int arch_add_memory(int nid, u64 start, u64 size)
673{
674 pg_data_t *pgdat;
675 struct zone *zone;
676 unsigned long start_pfn = start >> PAGE_SHIFT;
677 unsigned long nr_pages = size >> PAGE_SHIFT;
678 int ret;
679
680 pgdat = NODE_DATA(nid);
681
682 zone = pgdat->node_zones + ZONE_NORMAL;
683 ret = __add_pages(nid, zone, start_pfn, nr_pages);
684
685 if (ret)
686 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
687 __func__, ret);
688
689 return ret;
690}
691#endif
692
693
694
695
696
697
698
699
700static struct exec_domain ia32_exec_domain;
701
702static int __init
703per_linux32_init(void)
704{
705 ia32_exec_domain.name = "Linux/x86";
706 ia32_exec_domain.handler = NULL;
707 ia32_exec_domain.pers_low = PER_LINUX32;
708 ia32_exec_domain.pers_high = PER_LINUX32;
709 ia32_exec_domain.signal_map = default_exec_domain.signal_map;
710 ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
711 register_exec_domain(&ia32_exec_domain);
712
713 return 0;
714}
715
716__initcall(per_linux32_init);
717