1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9
10#include <linux/bootmem.h>
11#include <linux/efi.h>
12#include <linux/elf.h>
13#include <linux/memblock.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/module.h>
17#include <linux/personality.h>
18#include <linux/reboot.h>
19#include <linux/slab.h>
20#include <linux/swap.h>
21#include <linux/proc_fs.h>
22#include <linux/bitops.h>
23#include <linux/kexec.h>
24
25#include <asm/dma.h>
26#include <asm/io.h>
27#include <asm/machvec.h>
28#include <asm/numa.h>
29#include <asm/patch.h>
30#include <asm/pgalloc.h>
31#include <asm/sal.h>
32#include <asm/sections.h>
33#include <asm/tlb.h>
34#include <asm/uaccess.h>
35#include <asm/unistd.h>
36#include <asm/mca.h>
37#include <asm/paravirt.h>
38
39extern void ia64_tlb_init (void);
40
41unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
42
43#ifdef CONFIG_VIRTUAL_MEM_MAP
44unsigned long VMALLOC_END = VMALLOC_END_INIT;
45EXPORT_SYMBOL(VMALLOC_END);
46struct page *vmem_map;
47EXPORT_SYMBOL(vmem_map);
48#endif
49
50struct page *zero_page_memmap_ptr;
51EXPORT_SYMBOL(zero_page_memmap_ptr);
52
53void
54__ia64_sync_icache_dcache (pte_t pte)
55{
56 unsigned long addr;
57 struct page *page;
58
59 page = pte_page(pte);
60 addr = (unsigned long) page_address(page);
61
62 if (test_bit(PG_arch_1, &page->flags))
63 return;
64
65 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
66 set_bit(PG_arch_1, &page->flags);
67}
68
69
70
71
72
73
74void
75dma_mark_clean(void *addr, size_t size)
76{
77 unsigned long pg_addr, end;
78
79 pg_addr = PAGE_ALIGN((unsigned long) addr);
80 end = (unsigned long) addr + size;
81 while (pg_addr + PAGE_SIZE <= end) {
82 struct page *page = virt_to_page(pg_addr);
83 set_bit(PG_arch_1, &page->flags);
84 pg_addr += PAGE_SIZE;
85 }
86}
87
88inline void
89ia64_set_rbs_bot (void)
90{
91 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
92
93 if (stack_size > MAX_USER_STACK_SIZE)
94 stack_size = MAX_USER_STACK_SIZE;
95 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
96}
97
98
99
100
101
102
103
104void
105ia64_init_addr_space (void)
106{
107 struct vm_area_struct *vma;
108
109 ia64_set_rbs_bot();
110
111
112
113
114
115
116 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
117 if (vma) {
118 INIT_LIST_HEAD(&vma->anon_vma_chain);
119 vma->vm_mm = current->mm;
120 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
121 vma->vm_end = vma->vm_start + PAGE_SIZE;
122 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
123 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
124 down_write(¤t->mm->mmap_sem);
125 if (insert_vm_struct(current->mm, vma)) {
126 up_write(¤t->mm->mmap_sem);
127 kmem_cache_free(vm_area_cachep, vma);
128 return;
129 }
130 up_write(¤t->mm->mmap_sem);
131 }
132
133
134 if (!(current->personality & MMAP_PAGE_ZERO)) {
135 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
136 if (vma) {
137 INIT_LIST_HEAD(&vma->anon_vma_chain);
138 vma->vm_mm = current->mm;
139 vma->vm_end = PAGE_SIZE;
140 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
141 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
142 VM_DONTEXPAND | VM_DONTDUMP;
143 down_write(¤t->mm->mmap_sem);
144 if (insert_vm_struct(current->mm, vma)) {
145 up_write(¤t->mm->mmap_sem);
146 kmem_cache_free(vm_area_cachep, vma);
147 return;
148 }
149 up_write(¤t->mm->mmap_sem);
150 }
151 }
152}
153
154void
155free_initmem (void)
156{
157 free_reserved_area((unsigned long)ia64_imva(__init_begin),
158 (unsigned long)ia64_imva(__init_end),
159 0, "unused kernel");
160}
161
162void __init
163free_initrd_mem (unsigned long start, unsigned long end)
164{
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196 start = PAGE_ALIGN(start);
197 end = end & PAGE_MASK;
198
199 if (start < end)
200 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
201
202 for (; start < end; start += PAGE_SIZE) {
203 if (!virt_addr_valid(start))
204 continue;
205 free_reserved_page(virt_to_page(start));
206 }
207}
208
209
210
211
212static struct page * __init
213put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
214{
215 pgd_t *pgd;
216 pud_t *pud;
217 pmd_t *pmd;
218 pte_t *pte;
219
220 if (!PageReserved(page))
221 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
222 page_address(page));
223
224 pgd = pgd_offset_k(address);
225
226 {
227 pud = pud_alloc(&init_mm, pgd, address);
228 if (!pud)
229 goto out;
230 pmd = pmd_alloc(&init_mm, pud, address);
231 if (!pmd)
232 goto out;
233 pte = pte_alloc_kernel(pmd, address);
234 if (!pte)
235 goto out;
236 if (!pte_none(*pte))
237 goto out;
238 set_pte(pte, mk_pte(page, pgprot));
239 }
240 out:
241
242 return page;
243}
244
245static void __init
246setup_gate (void)
247{
248 void *gate_section;
249 struct page *page;
250
251
252
253
254
255
256 gate_section = paravirt_get_gate_section();
257 page = virt_to_page(ia64_imva(gate_section));
258 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
259#ifdef HAVE_BUGGY_SEGREL
260 page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
261 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
262#else
263 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
264
265 {
266 unsigned long addr;
267
268 for (addr = GATE_ADDR + PAGE_SIZE;
269 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
270 addr += PAGE_SIZE)
271 {
272 put_kernel_page(ZERO_PAGE(0), addr,
273 PAGE_READONLY);
274 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
275 PAGE_READONLY);
276 }
277 }
278#endif
279 ia64_patch_gate();
280}
281
282void ia64_mmu_init(void *my_cpu_data)
283{
284 unsigned long pta, impl_va_bits;
285 extern void tlb_init(void);
286
287#ifdef CONFIG_DISABLE_VHPT
288# define VHPT_ENABLE_BIT 0
289#else
290# define VHPT_ENABLE_BIT 1
291#endif
292
293
294
295
296
297
298
299
300
301
302
303
304# define pte_bits 3
305# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
306
307
308
309
310
311
312
313# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
314# define POW2(n) (1ULL << (n))
315
316 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
317
318 if (impl_va_bits < 51 || impl_va_bits > 61)
319 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
320
321
322
323
324
325
326 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
327 (mapped_space_bits > impl_va_bits - 1))
328 panic("Cannot build a big enough virtual-linear page table"
329 " to cover mapped address space.\n"
330 " Try using a smaller page size.\n");
331
332
333
334 pta = POW2(61) - POW2(vmlpt_bits);
335
336
337
338
339
340
341
342 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
343
344 ia64_tlb_init();
345
346#ifdef CONFIG_HUGETLB_PAGE
347 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
348 ia64_srlz_d();
349#endif
350}
351
352#ifdef CONFIG_VIRTUAL_MEM_MAP
353int vmemmap_find_next_valid_pfn(int node, int i)
354{
355 unsigned long end_address, hole_next_pfn;
356 unsigned long stop_address;
357 pg_data_t *pgdat = NODE_DATA(node);
358
359 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
360 end_address = PAGE_ALIGN(end_address);
361
362 stop_address = (unsigned long) &vmem_map[
363 pgdat->node_start_pfn + pgdat->node_spanned_pages];
364
365 do {
366 pgd_t *pgd;
367 pud_t *pud;
368 pmd_t *pmd;
369 pte_t *pte;
370
371 pgd = pgd_offset_k(end_address);
372 if (pgd_none(*pgd)) {
373 end_address += PGDIR_SIZE;
374 continue;
375 }
376
377 pud = pud_offset(pgd, end_address);
378 if (pud_none(*pud)) {
379 end_address += PUD_SIZE;
380 continue;
381 }
382
383 pmd = pmd_offset(pud, end_address);
384 if (pmd_none(*pmd)) {
385 end_address += PMD_SIZE;
386 continue;
387 }
388
389 pte = pte_offset_kernel(pmd, end_address);
390retry_pte:
391 if (pte_none(*pte)) {
392 end_address += PAGE_SIZE;
393 pte++;
394 if ((end_address < stop_address) &&
395 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
396 goto retry_pte;
397 continue;
398 }
399
400 break;
401 } while (end_address < stop_address);
402
403 end_address = min(end_address, stop_address);
404 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
405 hole_next_pfn = end_address / sizeof(struct page);
406 return hole_next_pfn - pgdat->node_start_pfn;
407}
408
409int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
410{
411 unsigned long address, start_page, end_page;
412 struct page *map_start, *map_end;
413 int node;
414 pgd_t *pgd;
415 pud_t *pud;
416 pmd_t *pmd;
417 pte_t *pte;
418
419 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
420 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
421
422 start_page = (unsigned long) map_start & PAGE_MASK;
423 end_page = PAGE_ALIGN((unsigned long) map_end);
424 node = paddr_to_nid(__pa(start));
425
426 for (address = start_page; address < end_page; address += PAGE_SIZE) {
427 pgd = pgd_offset_k(address);
428 if (pgd_none(*pgd))
429 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
430 pud = pud_offset(pgd, address);
431
432 if (pud_none(*pud))
433 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
434 pmd = pmd_offset(pud, address);
435
436 if (pmd_none(*pmd))
437 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
438 pte = pte_offset_kernel(pmd, address);
439
440 if (pte_none(*pte))
441 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
442 PAGE_KERNEL));
443 }
444 return 0;
445}
446
447struct memmap_init_callback_data {
448 struct page *start;
449 struct page *end;
450 int nid;
451 unsigned long zone;
452};
453
454static int __meminit
455virtual_memmap_init(u64 start, u64 end, void *arg)
456{
457 struct memmap_init_callback_data *args;
458 struct page *map_start, *map_end;
459
460 args = (struct memmap_init_callback_data *) arg;
461 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
462 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
463
464 if (map_start < args->start)
465 map_start = args->start;
466 if (map_end > args->end)
467 map_end = args->end;
468
469
470
471
472
473
474 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
475 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
476 / sizeof(struct page));
477
478 if (map_start < map_end)
479 memmap_init_zone((unsigned long)(map_end - map_start),
480 args->nid, args->zone, page_to_pfn(map_start),
481 MEMMAP_EARLY, NULL);
482 return 0;
483}
484
485void __meminit
486memmap_init (unsigned long size, int nid, unsigned long zone,
487 unsigned long start_pfn)
488{
489 if (!vmem_map) {
490 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
491 NULL);
492 } else {
493 struct page *start;
494 struct memmap_init_callback_data args;
495
496 start = pfn_to_page(start_pfn);
497 args.start = start;
498 args.end = start + size;
499 args.nid = nid;
500 args.zone = zone;
501
502 efi_memmap_walk(virtual_memmap_init, &args);
503 }
504}
505
506int
507ia64_pfn_valid (unsigned long pfn)
508{
509 char byte;
510 struct page *pg = pfn_to_page(pfn);
511
512 return (__get_user(byte, (char __user *) pg) == 0)
513 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
514 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
515}
516EXPORT_SYMBOL(ia64_pfn_valid);
517
518int __init find_largest_hole(u64 start, u64 end, void *arg)
519{
520 u64 *max_gap = arg;
521
522 static u64 last_end = PAGE_OFFSET;
523
524
525
526 if (*max_gap < (start - last_end))
527 *max_gap = start - last_end;
528 last_end = end;
529 return 0;
530}
531
532#endif
533
534int __init register_active_ranges(u64 start, u64 len, int nid)
535{
536 u64 end = start + len;
537
538#ifdef CONFIG_KEXEC
539 if (start > crashk_res.start && start < crashk_res.end)
540 start = crashk_res.end;
541 if (end > crashk_res.start && end < crashk_res.end)
542 end = crashk_res.start;
543#endif
544
545 if (start < end)
546 memblock_add_node(__pa(start), end - start, nid);
547 return 0;
548}
549
550static int __init
551count_reserved_pages(u64 start, u64 end, void *arg)
552{
553 unsigned long num_reserved = 0;
554 unsigned long *count = arg;
555
556 for (; start < end; start += PAGE_SIZE)
557 if (PageReserved(virt_to_page(start)))
558 ++num_reserved;
559 *count += num_reserved;
560 return 0;
561}
562
563int
564find_max_min_low_pfn (u64 start, u64 end, void *arg)
565{
566 unsigned long pfn_start, pfn_end;
567#ifdef CONFIG_FLATMEM
568 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
569 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
570#else
571 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
572 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
573#endif
574 min_low_pfn = min(min_low_pfn, pfn_start);
575 max_low_pfn = max(max_low_pfn, pfn_end);
576 return 0;
577}
578
579
580
581
582
583
584
585
586
587static int nolwsys __initdata;
588
589static int __init
590nolwsys_setup (char *s)
591{
592 nolwsys = 1;
593 return 1;
594}
595
596__setup("nolwsys", nolwsys_setup);
597
598void __init
599mem_init (void)
600{
601 long reserved_pages, codesize, datasize, initsize;
602 pg_data_t *pgdat;
603 int i;
604
605 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
606 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
607 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
608
609#ifdef CONFIG_PCI
610
611
612
613
614
615 platform_dma_init();
616#endif
617
618#ifdef CONFIG_FLATMEM
619 BUG_ON(!mem_map);
620 max_mapnr = max_low_pfn;
621#endif
622
623 high_memory = __va(max_low_pfn * PAGE_SIZE);
624
625 for_each_online_pgdat(pgdat)
626 if (pgdat->bdata->node_bootmem_map)
627 totalram_pages += free_all_bootmem_node(pgdat);
628
629 reserved_pages = 0;
630 efi_memmap_walk(count_reserved_pages, &reserved_pages);
631
632 codesize = (unsigned long) _etext - (unsigned long) _stext;
633 datasize = (unsigned long) _edata - (unsigned long) _etext;
634 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
635
636 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
637 "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10),
638 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
639 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
640
641
642
643
644
645
646
647 for (i = 0; i < NR_syscalls; ++i) {
648 extern unsigned long sys_call_table[NR_syscalls];
649 unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
650
651 if (!fsyscall_table[i] || nolwsys)
652 fsyscall_table[i] = sys_call_table[i] | 1;
653 }
654 setup_gate();
655}
656
657#ifdef CONFIG_MEMORY_HOTPLUG
658int arch_add_memory(int nid, u64 start, u64 size)
659{
660 pg_data_t *pgdat;
661 struct zone *zone;
662 unsigned long start_pfn = start >> PAGE_SHIFT;
663 unsigned long nr_pages = size >> PAGE_SHIFT;
664 int ret;
665
666 pgdat = NODE_DATA(nid);
667
668 zone = pgdat->node_zones + ZONE_NORMAL;
669 ret = __add_pages(nid, zone, start_pfn, nr_pages);
670
671 if (ret)
672 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
673 __func__, ret);
674
675 return ret;
676}
677
678#ifdef CONFIG_MEMORY_HOTREMOVE
679int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
680{
681 unsigned long start_pfn = start >> PAGE_SHIFT;
682 unsigned long nr_pages = size >> PAGE_SHIFT;
683 struct zone *zone;
684 int ret;
685
686 zone = page_zone(pfn_to_page(start_pfn));
687 ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
688 if (ret)
689 pr_warn("%s: Problem encountered in __remove_pages() as"
690 " ret=%d\n", __func__, ret);
691
692 return ret;
693}
694#endif
695#endif
696
697
698
699
700
701
702
703
704static struct exec_domain ia32_exec_domain;
705
706static int __init
707per_linux32_init(void)
708{
709 ia32_exec_domain.name = "Linux/x86";
710 ia32_exec_domain.handler = NULL;
711 ia32_exec_domain.pers_low = PER_LINUX32;
712 ia32_exec_domain.pers_high = PER_LINUX32;
713 ia32_exec_domain.signal_map = default_exec_domain.signal_map;
714 ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
715 register_exec_domain(&ia32_exec_domain);
716
717 return 0;
718}
719
720__initcall(per_linux32_init);
721