1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9
10#include <linux/bootmem.h>
11#include <linux/efi.h>
12#include <linux/elf.h>
13#include <linux/memblock.h>
14#include <linux/mm.h>
15#include <linux/sched/signal.h>
16#include <linux/mmzone.h>
17#include <linux/module.h>
18#include <linux/personality.h>
19#include <linux/reboot.h>
20#include <linux/slab.h>
21#include <linux/swap.h>
22#include <linux/proc_fs.h>
23#include <linux/bitops.h>
24#include <linux/kexec.h>
25
26#include <asm/dma.h>
27#include <asm/io.h>
28#include <asm/machvec.h>
29#include <asm/numa.h>
30#include <asm/patch.h>
31#include <asm/pgalloc.h>
32#include <asm/sal.h>
33#include <asm/sections.h>
34#include <asm/tlb.h>
35#include <linux/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/mca.h>
38
39extern void ia64_tlb_init (void);
40
41unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
42
43#ifdef CONFIG_VIRTUAL_MEM_MAP
44unsigned long VMALLOC_END = VMALLOC_END_INIT;
45EXPORT_SYMBOL(VMALLOC_END);
46struct page *vmem_map;
47EXPORT_SYMBOL(vmem_map);
48#endif
49
50struct page *zero_page_memmap_ptr;
51EXPORT_SYMBOL(zero_page_memmap_ptr);
52
53void
54__ia64_sync_icache_dcache (pte_t pte)
55{
56 unsigned long addr;
57 struct page *page;
58
59 page = pte_page(pte);
60 addr = (unsigned long) page_address(page);
61
62 if (test_bit(PG_arch_1, &page->flags))
63 return;
64
65 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
66 set_bit(PG_arch_1, &page->flags);
67}
68
69
70
71
72
73
74void
75dma_mark_clean(void *addr, size_t size)
76{
77 unsigned long pg_addr, end;
78
79 pg_addr = PAGE_ALIGN((unsigned long) addr);
80 end = (unsigned long) addr + size;
81 while (pg_addr + PAGE_SIZE <= end) {
82 struct page *page = virt_to_page(pg_addr);
83 set_bit(PG_arch_1, &page->flags);
84 pg_addr += PAGE_SIZE;
85 }
86}
87
88inline void
89ia64_set_rbs_bot (void)
90{
91 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
92
93 if (stack_size > MAX_USER_STACK_SIZE)
94 stack_size = MAX_USER_STACK_SIZE;
95 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
96}
97
98
99
100
101
102
103
104void
105ia64_init_addr_space (void)
106{
107 struct vm_area_struct *vma;
108
109 ia64_set_rbs_bot();
110
111
112
113
114
115
116 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
117 if (vma) {
118 INIT_LIST_HEAD(&vma->anon_vma_chain);
119 vma->vm_mm = current->mm;
120 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
121 vma->vm_end = vma->vm_start + PAGE_SIZE;
122 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
123 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
124 down_write(¤t->mm->mmap_sem);
125 if (insert_vm_struct(current->mm, vma)) {
126 up_write(¤t->mm->mmap_sem);
127 kmem_cache_free(vm_area_cachep, vma);
128 return;
129 }
130 up_write(¤t->mm->mmap_sem);
131 }
132
133
134 if (!(current->personality & MMAP_PAGE_ZERO)) {
135 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
136 if (vma) {
137 INIT_LIST_HEAD(&vma->anon_vma_chain);
138 vma->vm_mm = current->mm;
139 vma->vm_end = PAGE_SIZE;
140 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
141 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
142 VM_DONTEXPAND | VM_DONTDUMP;
143 down_write(¤t->mm->mmap_sem);
144 if (insert_vm_struct(current->mm, vma)) {
145 up_write(¤t->mm->mmap_sem);
146 kmem_cache_free(vm_area_cachep, vma);
147 return;
148 }
149 up_write(¤t->mm->mmap_sem);
150 }
151 }
152}
153
154void
155free_initmem (void)
156{
157 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
158 -1, "unused kernel");
159}
160
161void __init
162free_initrd_mem (unsigned long start, unsigned long end)
163{
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195 start = PAGE_ALIGN(start);
196 end = end & PAGE_MASK;
197
198 if (start < end)
199 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
200
201 for (; start < end; start += PAGE_SIZE) {
202 if (!virt_addr_valid(start))
203 continue;
204 free_reserved_page(virt_to_page(start));
205 }
206}
207
208
209
210
211static struct page * __init
212put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
213{
214 pgd_t *pgd;
215 pud_t *pud;
216 pmd_t *pmd;
217 pte_t *pte;
218
219 pgd = pgd_offset_k(address);
220
221 {
222 pud = pud_alloc(&init_mm, pgd, address);
223 if (!pud)
224 goto out;
225 pmd = pmd_alloc(&init_mm, pud, address);
226 if (!pmd)
227 goto out;
228 pte = pte_alloc_kernel(pmd, address);
229 if (!pte)
230 goto out;
231 if (!pte_none(*pte))
232 goto out;
233 set_pte(pte, mk_pte(page, pgprot));
234 }
235 out:
236
237 return page;
238}
239
240static void __init
241setup_gate (void)
242{
243 struct page *page;
244
245
246
247
248
249
250 page = virt_to_page(ia64_imva(__start_gate_section));
251 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
252#ifdef HAVE_BUGGY_SEGREL
253 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
254 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
255#else
256 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
257
258 {
259 unsigned long addr;
260
261 for (addr = GATE_ADDR + PAGE_SIZE;
262 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
263 addr += PAGE_SIZE)
264 {
265 put_kernel_page(ZERO_PAGE(0), addr,
266 PAGE_READONLY);
267 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
268 PAGE_READONLY);
269 }
270 }
271#endif
272 ia64_patch_gate();
273}
274
275static struct vm_area_struct gate_vma;
276
277static int __init gate_vma_init(void)
278{
279 gate_vma.vm_mm = NULL;
280 gate_vma.vm_start = FIXADDR_USER_START;
281 gate_vma.vm_end = FIXADDR_USER_END;
282 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
283 gate_vma.vm_page_prot = __P101;
284
285 return 0;
286}
287__initcall(gate_vma_init);
288
289struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
290{
291 return &gate_vma;
292}
293
294int in_gate_area_no_mm(unsigned long addr)
295{
296 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
297 return 1;
298 return 0;
299}
300
301int in_gate_area(struct mm_struct *mm, unsigned long addr)
302{
303 return in_gate_area_no_mm(addr);
304}
305
306void ia64_mmu_init(void *my_cpu_data)
307{
308 unsigned long pta, impl_va_bits;
309 extern void tlb_init(void);
310
311#ifdef CONFIG_DISABLE_VHPT
312# define VHPT_ENABLE_BIT 0
313#else
314# define VHPT_ENABLE_BIT 1
315#endif
316
317
318
319
320
321
322
323
324
325
326
327
328# define pte_bits 3
329# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
330
331
332
333
334
335
336
337# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
338# define POW2(n) (1ULL << (n))
339
340 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
341
342 if (impl_va_bits < 51 || impl_va_bits > 61)
343 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
344
345
346
347
348
349
350 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
351 (mapped_space_bits > impl_va_bits - 1))
352 panic("Cannot build a big enough virtual-linear page table"
353 " to cover mapped address space.\n"
354 " Try using a smaller page size.\n");
355
356
357
358 pta = POW2(61) - POW2(vmlpt_bits);
359
360
361
362
363
364
365
366 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
367
368 ia64_tlb_init();
369
370#ifdef CONFIG_HUGETLB_PAGE
371 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
372 ia64_srlz_d();
373#endif
374}
375
376#ifdef CONFIG_VIRTUAL_MEM_MAP
377int vmemmap_find_next_valid_pfn(int node, int i)
378{
379 unsigned long end_address, hole_next_pfn;
380 unsigned long stop_address;
381 pg_data_t *pgdat = NODE_DATA(node);
382
383 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
384 end_address = PAGE_ALIGN(end_address);
385 stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
386
387 do {
388 pgd_t *pgd;
389 pud_t *pud;
390 pmd_t *pmd;
391 pte_t *pte;
392
393 pgd = pgd_offset_k(end_address);
394 if (pgd_none(*pgd)) {
395 end_address += PGDIR_SIZE;
396 continue;
397 }
398
399 pud = pud_offset(pgd, end_address);
400 if (pud_none(*pud)) {
401 end_address += PUD_SIZE;
402 continue;
403 }
404
405 pmd = pmd_offset(pud, end_address);
406 if (pmd_none(*pmd)) {
407 end_address += PMD_SIZE;
408 continue;
409 }
410
411 pte = pte_offset_kernel(pmd, end_address);
412retry_pte:
413 if (pte_none(*pte)) {
414 end_address += PAGE_SIZE;
415 pte++;
416 if ((end_address < stop_address) &&
417 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
418 goto retry_pte;
419 continue;
420 }
421
422 break;
423 } while (end_address < stop_address);
424
425 end_address = min(end_address, stop_address);
426 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
427 hole_next_pfn = end_address / sizeof(struct page);
428 return hole_next_pfn - pgdat->node_start_pfn;
429}
430
431int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
432{
433 unsigned long address, start_page, end_page;
434 struct page *map_start, *map_end;
435 int node;
436 pgd_t *pgd;
437 pud_t *pud;
438 pmd_t *pmd;
439 pte_t *pte;
440
441 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
442 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
443
444 start_page = (unsigned long) map_start & PAGE_MASK;
445 end_page = PAGE_ALIGN((unsigned long) map_end);
446 node = paddr_to_nid(__pa(start));
447
448 for (address = start_page; address < end_page; address += PAGE_SIZE) {
449 pgd = pgd_offset_k(address);
450 if (pgd_none(*pgd))
451 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
452 pud = pud_offset(pgd, address);
453
454 if (pud_none(*pud))
455 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
456 pmd = pmd_offset(pud, address);
457
458 if (pmd_none(*pmd))
459 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
460 pte = pte_offset_kernel(pmd, address);
461
462 if (pte_none(*pte))
463 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
464 PAGE_KERNEL));
465 }
466 return 0;
467}
468
469struct memmap_init_callback_data {
470 struct page *start;
471 struct page *end;
472 int nid;
473 unsigned long zone;
474};
475
476static int __meminit
477virtual_memmap_init(u64 start, u64 end, void *arg)
478{
479 struct memmap_init_callback_data *args;
480 struct page *map_start, *map_end;
481
482 args = (struct memmap_init_callback_data *) arg;
483 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
484 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
485
486 if (map_start < args->start)
487 map_start = args->start;
488 if (map_end > args->end)
489 map_end = args->end;
490
491
492
493
494
495
496 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
497 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
498 / sizeof(struct page));
499
500 if (map_start < map_end)
501 memmap_init_zone((unsigned long)(map_end - map_start),
502 args->nid, args->zone, page_to_pfn(map_start),
503 MEMMAP_EARLY);
504 return 0;
505}
506
507void __meminit
508memmap_init (unsigned long size, int nid, unsigned long zone,
509 unsigned long start_pfn)
510{
511 if (!vmem_map)
512 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
513 else {
514 struct page *start;
515 struct memmap_init_callback_data args;
516
517 start = pfn_to_page(start_pfn);
518 args.start = start;
519 args.end = start + size;
520 args.nid = nid;
521 args.zone = zone;
522
523 efi_memmap_walk(virtual_memmap_init, &args);
524 }
525}
526
527int
528ia64_pfn_valid (unsigned long pfn)
529{
530 char byte;
531 struct page *pg = pfn_to_page(pfn);
532
533 return (__get_user(byte, (char __user *) pg) == 0)
534 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
535 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
536}
537EXPORT_SYMBOL(ia64_pfn_valid);
538
539int __init find_largest_hole(u64 start, u64 end, void *arg)
540{
541 u64 *max_gap = arg;
542
543 static u64 last_end = PAGE_OFFSET;
544
545
546
547 if (*max_gap < (start - last_end))
548 *max_gap = start - last_end;
549 last_end = end;
550 return 0;
551}
552
553#endif
554
555int __init register_active_ranges(u64 start, u64 len, int nid)
556{
557 u64 end = start + len;
558
559#ifdef CONFIG_KEXEC
560 if (start > crashk_res.start && start < crashk_res.end)
561 start = crashk_res.end;
562 if (end > crashk_res.start && end < crashk_res.end)
563 end = crashk_res.start;
564#endif
565
566 if (start < end)
567 memblock_add_node(__pa(start), end - start, nid);
568 return 0;
569}
570
571int
572find_max_min_low_pfn (u64 start, u64 end, void *arg)
573{
574 unsigned long pfn_start, pfn_end;
575#ifdef CONFIG_FLATMEM
576 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
577 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
578#else
579 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
580 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
581#endif
582 min_low_pfn = min(min_low_pfn, pfn_start);
583 max_low_pfn = max(max_low_pfn, pfn_end);
584 return 0;
585}
586
587
588
589
590
591
592
593
594
595static int nolwsys __initdata;
596
597static int __init
598nolwsys_setup (char *s)
599{
600 nolwsys = 1;
601 return 1;
602}
603
604__setup("nolwsys", nolwsys_setup);
605
606void __init
607mem_init (void)
608{
609 int i;
610
611 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
612 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
613 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
614
615#ifdef CONFIG_PCI
616
617
618
619
620
621 platform_dma_init();
622#endif
623
624#ifdef CONFIG_FLATMEM
625 BUG_ON(!mem_map);
626#endif
627
628 set_max_mapnr(max_low_pfn);
629 high_memory = __va(max_low_pfn * PAGE_SIZE);
630 free_all_bootmem();
631 mem_init_print_info(NULL);
632
633
634
635
636
637
638 for (i = 0; i < NR_syscalls; ++i) {
639 extern unsigned long fsyscall_table[NR_syscalls];
640 extern unsigned long sys_call_table[NR_syscalls];
641
642 if (!fsyscall_table[i] || nolwsys)
643 fsyscall_table[i] = sys_call_table[i] | 1;
644 }
645 setup_gate();
646}
647
648#ifdef CONFIG_MEMORY_HOTPLUG
649int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
650{
651 pg_data_t *pgdat;
652 struct zone *zone;
653 unsigned long start_pfn = start >> PAGE_SHIFT;
654 unsigned long nr_pages = size >> PAGE_SHIFT;
655 int ret;
656
657 pgdat = NODE_DATA(nid);
658
659 zone = pgdat->node_zones +
660 zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
661 ret = __add_pages(nid, zone, start_pfn, nr_pages);
662
663 if (ret)
664 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
665 __func__, ret);
666
667 return ret;
668}
669
670#ifdef CONFIG_MEMORY_HOTREMOVE
671int arch_remove_memory(u64 start, u64 size)
672{
673 unsigned long start_pfn = start >> PAGE_SHIFT;
674 unsigned long nr_pages = size >> PAGE_SHIFT;
675 struct zone *zone;
676 int ret;
677
678 zone = page_zone(pfn_to_page(start_pfn));
679 ret = __remove_pages(zone, start_pfn, nr_pages);
680 if (ret)
681 pr_warn("%s: Problem encountered in __remove_pages() as"
682 " ret=%d\n", __func__, ret);
683
684 return ret;
685}
686#endif
687#endif
688