1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9
10#include <linux/bootmem.h>
11#include <linux/efi.h>
12#include <linux/elf.h>
13#include <linux/memblock.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/module.h>
17#include <linux/personality.h>
18#include <linux/reboot.h>
19#include <linux/slab.h>
20#include <linux/swap.h>
21#include <linux/proc_fs.h>
22#include <linux/bitops.h>
23#include <linux/kexec.h>
24
25#include <asm/dma.h>
26#include <asm/io.h>
27#include <asm/machvec.h>
28#include <asm/numa.h>
29#include <asm/patch.h>
30#include <asm/pgalloc.h>
31#include <asm/sal.h>
32#include <asm/sections.h>
33#include <asm/tlb.h>
34#include <asm/uaccess.h>
35#include <asm/unistd.h>
36#include <asm/mca.h>
37#include <asm/paravirt.h>
38
39extern void ia64_tlb_init (void);
40
41unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
42
43#ifdef CONFIG_VIRTUAL_MEM_MAP
44unsigned long VMALLOC_END = VMALLOC_END_INIT;
45EXPORT_SYMBOL(VMALLOC_END);
46struct page *vmem_map;
47EXPORT_SYMBOL(vmem_map);
48#endif
49
50struct page *zero_page_memmap_ptr;
51EXPORT_SYMBOL(zero_page_memmap_ptr);
52
53void
54__ia64_sync_icache_dcache (pte_t pte)
55{
56 unsigned long addr;
57 struct page *page;
58
59 page = pte_page(pte);
60 addr = (unsigned long) page_address(page);
61
62 if (test_bit(PG_arch_1, &page->flags))
63 return;
64
65 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
66 set_bit(PG_arch_1, &page->flags);
67}
68
69
70
71
72
73
74void
75dma_mark_clean(void *addr, size_t size)
76{
77 unsigned long pg_addr, end;
78
79 pg_addr = PAGE_ALIGN((unsigned long) addr);
80 end = (unsigned long) addr + size;
81 while (pg_addr + PAGE_SIZE <= end) {
82 struct page *page = virt_to_page(pg_addr);
83 set_bit(PG_arch_1, &page->flags);
84 pg_addr += PAGE_SIZE;
85 }
86}
87
88inline void
89ia64_set_rbs_bot (void)
90{
91 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
92
93 if (stack_size > MAX_USER_STACK_SIZE)
94 stack_size = MAX_USER_STACK_SIZE;
95 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
96}
97
98
99
100
101
102
103
104void
105ia64_init_addr_space (void)
106{
107 struct vm_area_struct *vma;
108
109 ia64_set_rbs_bot();
110
111
112
113
114
115
116 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
117 if (vma) {
118 INIT_LIST_HEAD(&vma->anon_vma_chain);
119 vma->vm_mm = current->mm;
120 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
121 vma->vm_end = vma->vm_start + PAGE_SIZE;
122 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
123 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
124 down_write(¤t->mm->mmap_sem);
125 if (insert_vm_struct(current->mm, vma)) {
126 up_write(¤t->mm->mmap_sem);
127 kmem_cache_free(vm_area_cachep, vma);
128 return;
129 }
130 up_write(¤t->mm->mmap_sem);
131 }
132
133
134 if (!(current->personality & MMAP_PAGE_ZERO)) {
135 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
136 if (vma) {
137 INIT_LIST_HEAD(&vma->anon_vma_chain);
138 vma->vm_mm = current->mm;
139 vma->vm_end = PAGE_SIZE;
140 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
141 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
142 VM_DONTEXPAND | VM_DONTDUMP;
143 down_write(¤t->mm->mmap_sem);
144 if (insert_vm_struct(current->mm, vma)) {
145 up_write(¤t->mm->mmap_sem);
146 kmem_cache_free(vm_area_cachep, vma);
147 return;
148 }
149 up_write(¤t->mm->mmap_sem);
150 }
151 }
152}
153
154void
155free_initmem (void)
156{
157 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
158 -1, "unused kernel");
159}
160
161void __init
162free_initrd_mem (unsigned long start, unsigned long end)
163{
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195 start = PAGE_ALIGN(start);
196 end = end & PAGE_MASK;
197
198 if (start < end)
199 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
200
201 for (; start < end; start += PAGE_SIZE) {
202 if (!virt_addr_valid(start))
203 continue;
204 free_reserved_page(virt_to_page(start));
205 }
206}
207
208
209
210
211static struct page * __init
212put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
213{
214 pgd_t *pgd;
215 pud_t *pud;
216 pmd_t *pmd;
217 pte_t *pte;
218
219 if (!PageReserved(page))
220 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
221 page_address(page));
222
223 pgd = pgd_offset_k(address);
224
225 {
226 pud = pud_alloc(&init_mm, pgd, address);
227 if (!pud)
228 goto out;
229 pmd = pmd_alloc(&init_mm, pud, address);
230 if (!pmd)
231 goto out;
232 pte = pte_alloc_kernel(pmd, address);
233 if (!pte)
234 goto out;
235 if (!pte_none(*pte))
236 goto out;
237 set_pte(pte, mk_pte(page, pgprot));
238 }
239 out:
240
241 return page;
242}
243
244static void __init
245setup_gate (void)
246{
247 void *gate_section;
248 struct page *page;
249
250
251
252
253
254
255 gate_section = paravirt_get_gate_section();
256 page = virt_to_page(ia64_imva(gate_section));
257 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
258#ifdef HAVE_BUGGY_SEGREL
259 page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
260 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
261#else
262 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
263
264 {
265 unsigned long addr;
266
267 for (addr = GATE_ADDR + PAGE_SIZE;
268 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
269 addr += PAGE_SIZE)
270 {
271 put_kernel_page(ZERO_PAGE(0), addr,
272 PAGE_READONLY);
273 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
274 PAGE_READONLY);
275 }
276 }
277#endif
278 ia64_patch_gate();
279}
280
281void ia64_mmu_init(void *my_cpu_data)
282{
283 unsigned long pta, impl_va_bits;
284 extern void tlb_init(void);
285
286#ifdef CONFIG_DISABLE_VHPT
287# define VHPT_ENABLE_BIT 0
288#else
289# define VHPT_ENABLE_BIT 1
290#endif
291
292
293
294
295
296
297
298
299
300
301
302
303# define pte_bits 3
304# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
305
306
307
308
309
310
311
312# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
313# define POW2(n) (1ULL << (n))
314
315 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
316
317 if (impl_va_bits < 51 || impl_va_bits > 61)
318 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
319
320
321
322
323
324
325 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
326 (mapped_space_bits > impl_va_bits - 1))
327 panic("Cannot build a big enough virtual-linear page table"
328 " to cover mapped address space.\n"
329 " Try using a smaller page size.\n");
330
331
332
333 pta = POW2(61) - POW2(vmlpt_bits);
334
335
336
337
338
339
340
341 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
342
343 ia64_tlb_init();
344
345#ifdef CONFIG_HUGETLB_PAGE
346 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
347 ia64_srlz_d();
348#endif
349}
350
351#ifdef CONFIG_VIRTUAL_MEM_MAP
352int vmemmap_find_next_valid_pfn(int node, int i)
353{
354 unsigned long end_address, hole_next_pfn;
355 unsigned long stop_address;
356 pg_data_t *pgdat = NODE_DATA(node);
357
358 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
359 end_address = PAGE_ALIGN(end_address);
360
361 stop_address = (unsigned long) &vmem_map[
362 pgdat->node_start_pfn + pgdat->node_spanned_pages];
363
364 do {
365 pgd_t *pgd;
366 pud_t *pud;
367 pmd_t *pmd;
368 pte_t *pte;
369
370 pgd = pgd_offset_k(end_address);
371 if (pgd_none(*pgd)) {
372 end_address += PGDIR_SIZE;
373 continue;
374 }
375
376 pud = pud_offset(pgd, end_address);
377 if (pud_none(*pud)) {
378 end_address += PUD_SIZE;
379 continue;
380 }
381
382 pmd = pmd_offset(pud, end_address);
383 if (pmd_none(*pmd)) {
384 end_address += PMD_SIZE;
385 continue;
386 }
387
388 pte = pte_offset_kernel(pmd, end_address);
389retry_pte:
390 if (pte_none(*pte)) {
391 end_address += PAGE_SIZE;
392 pte++;
393 if ((end_address < stop_address) &&
394 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
395 goto retry_pte;
396 continue;
397 }
398
399 break;
400 } while (end_address < stop_address);
401
402 end_address = min(end_address, stop_address);
403 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
404 hole_next_pfn = end_address / sizeof(struct page);
405 return hole_next_pfn - pgdat->node_start_pfn;
406}
407
408int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
409{
410 unsigned long address, start_page, end_page;
411 struct page *map_start, *map_end;
412 int node;
413 pgd_t *pgd;
414 pud_t *pud;
415 pmd_t *pmd;
416 pte_t *pte;
417
418 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
419 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
420
421 start_page = (unsigned long) map_start & PAGE_MASK;
422 end_page = PAGE_ALIGN((unsigned long) map_end);
423 node = paddr_to_nid(__pa(start));
424
425 for (address = start_page; address < end_page; address += PAGE_SIZE) {
426 pgd = pgd_offset_k(address);
427 if (pgd_none(*pgd))
428 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
429 pud = pud_offset(pgd, address);
430
431 if (pud_none(*pud))
432 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
433 pmd = pmd_offset(pud, address);
434
435 if (pmd_none(*pmd))
436 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
437 pte = pte_offset_kernel(pmd, address);
438
439 if (pte_none(*pte))
440 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
441 PAGE_KERNEL));
442 }
443 return 0;
444}
445
446struct memmap_init_callback_data {
447 struct page *start;
448 struct page *end;
449 int nid;
450 unsigned long zone;
451};
452
453static int __meminit
454virtual_memmap_init(u64 start, u64 end, void *arg)
455{
456 struct memmap_init_callback_data *args;
457 struct page *map_start, *map_end;
458
459 args = (struct memmap_init_callback_data *) arg;
460 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
461 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
462
463 if (map_start < args->start)
464 map_start = args->start;
465 if (map_end > args->end)
466 map_end = args->end;
467
468
469
470
471
472
473 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
474 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
475 / sizeof(struct page));
476
477 if (map_start < map_end)
478 memmap_init_zone((unsigned long)(map_end - map_start),
479 args->nid, args->zone, page_to_pfn(map_start),
480 MEMMAP_EARLY);
481 return 0;
482}
483
484void __meminit
485memmap_init (unsigned long size, int nid, unsigned long zone,
486 unsigned long start_pfn)
487{
488 if (!vmem_map)
489 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
490 else {
491 struct page *start;
492 struct memmap_init_callback_data args;
493
494 start = pfn_to_page(start_pfn);
495 args.start = start;
496 args.end = start + size;
497 args.nid = nid;
498 args.zone = zone;
499
500 efi_memmap_walk(virtual_memmap_init, &args);
501 }
502}
503
504int
505ia64_pfn_valid (unsigned long pfn)
506{
507 char byte;
508 struct page *pg = pfn_to_page(pfn);
509
510 return (__get_user(byte, (char __user *) pg) == 0)
511 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
512 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
513}
514EXPORT_SYMBOL(ia64_pfn_valid);
515
516int __init find_largest_hole(u64 start, u64 end, void *arg)
517{
518 u64 *max_gap = arg;
519
520 static u64 last_end = PAGE_OFFSET;
521
522
523
524 if (*max_gap < (start - last_end))
525 *max_gap = start - last_end;
526 last_end = end;
527 return 0;
528}
529
530#endif
531
532int __init register_active_ranges(u64 start, u64 len, int nid)
533{
534 u64 end = start + len;
535
536#ifdef CONFIG_KEXEC
537 if (start > crashk_res.start && start < crashk_res.end)
538 start = crashk_res.end;
539 if (end > crashk_res.start && end < crashk_res.end)
540 end = crashk_res.start;
541#endif
542
543 if (start < end)
544 memblock_add_node(__pa(start), end - start, nid);
545 return 0;
546}
547
548int
549find_max_min_low_pfn (u64 start, u64 end, void *arg)
550{
551 unsigned long pfn_start, pfn_end;
552#ifdef CONFIG_FLATMEM
553 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
554 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
555#else
556 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
557 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
558#endif
559 min_low_pfn = min(min_low_pfn, pfn_start);
560 max_low_pfn = max(max_low_pfn, pfn_end);
561 return 0;
562}
563
564
565
566
567
568
569
570
571
572static int nolwsys __initdata;
573
574static int __init
575nolwsys_setup (char *s)
576{
577 nolwsys = 1;
578 return 1;
579}
580
581__setup("nolwsys", nolwsys_setup);
582
583void __init
584mem_init (void)
585{
586 int i;
587
588 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
589 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
590 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
591
592#ifdef CONFIG_PCI
593
594
595
596
597
598 platform_dma_init();
599#endif
600
601#ifdef CONFIG_FLATMEM
602 BUG_ON(!mem_map);
603#endif
604
605 set_max_mapnr(max_low_pfn);
606 high_memory = __va(max_low_pfn * PAGE_SIZE);
607 free_all_bootmem();
608 mem_init_print_info(NULL);
609
610
611
612
613
614
615 for (i = 0; i < NR_syscalls; ++i) {
616 extern unsigned long sys_call_table[NR_syscalls];
617 unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
618
619 if (!fsyscall_table[i] || nolwsys)
620 fsyscall_table[i] = sys_call_table[i] | 1;
621 }
622 setup_gate();
623}
624
625#ifdef CONFIG_MEMORY_HOTPLUG
626int arch_add_memory(int nid, u64 start, u64 size)
627{
628 pg_data_t *pgdat;
629 struct zone *zone;
630 unsigned long start_pfn = start >> PAGE_SHIFT;
631 unsigned long nr_pages = size >> PAGE_SHIFT;
632 int ret;
633
634 pgdat = NODE_DATA(nid);
635
636 zone = pgdat->node_zones + ZONE_NORMAL;
637 ret = __add_pages(nid, zone, start_pfn, nr_pages);
638
639 if (ret)
640 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
641 __func__, ret);
642
643 return ret;
644}
645
646#ifdef CONFIG_MEMORY_HOTREMOVE
647int arch_remove_memory(u64 start, u64 size)
648{
649 unsigned long start_pfn = start >> PAGE_SHIFT;
650 unsigned long nr_pages = size >> PAGE_SHIFT;
651 struct zone *zone;
652 int ret;
653
654 zone = page_zone(pfn_to_page(start_pfn));
655 ret = __remove_pages(zone, start_pfn, nr_pages);
656 if (ret)
657 pr_warn("%s: Problem encountered in __remove_pages() as"
658 " ret=%d\n", __func__, ret);
659
660 return ret;
661}
662#endif
663#endif
664
665
666
667
668
669
670
671
672static struct exec_domain ia32_exec_domain;
673
674static int __init
675per_linux32_init(void)
676{
677 ia32_exec_domain.name = "Linux/x86";
678 ia32_exec_domain.handler = NULL;
679 ia32_exec_domain.pers_low = PER_LINUX32;
680 ia32_exec_domain.pers_high = PER_LINUX32;
681 ia32_exec_domain.signal_map = default_exec_domain.signal_map;
682 ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
683 register_exec_domain(&ia32_exec_domain);
684
685 return 0;
686}
687
688__initcall(per_linux32_init);
689