1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10
11#include <linux/dma-map-ops.h>
12#include <linux/dmar.h>
13#include <linux/efi.h>
14#include <linux/elf.h>
15#include <linux/memblock.h>
16#include <linux/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/mmzone.h>
19#include <linux/module.h>
20#include <linux/personality.h>
21#include <linux/reboot.h>
22#include <linux/slab.h>
23#include <linux/swap.h>
24#include <linux/proc_fs.h>
25#include <linux/bitops.h>
26#include <linux/kexec.h>
27#include <linux/swiotlb.h>
28
29#include <asm/dma.h>
30#include <asm/io.h>
31#include <asm/numa.h>
32#include <asm/patch.h>
33#include <asm/pgalloc.h>
34#include <asm/sal.h>
35#include <asm/sections.h>
36#include <asm/tlb.h>
37#include <linux/uaccess.h>
38#include <asm/unistd.h>
39#include <asm/mca.h>
40
41extern void ia64_tlb_init (void);
42
43unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
44
45#ifdef CONFIG_VIRTUAL_MEM_MAP
46unsigned long VMALLOC_END = VMALLOC_END_INIT;
47EXPORT_SYMBOL(VMALLOC_END);
48struct page *vmem_map;
49EXPORT_SYMBOL(vmem_map);
50#endif
51
52struct page *zero_page_memmap_ptr;
53EXPORT_SYMBOL(zero_page_memmap_ptr);
54
55void
56__ia64_sync_icache_dcache (pte_t pte)
57{
58 unsigned long addr;
59 struct page *page;
60
61 page = pte_page(pte);
62 addr = (unsigned long) page_address(page);
63
64 if (test_bit(PG_arch_1, &page->flags))
65 return;
66
67 flush_icache_range(addr, addr + page_size(page));
68 set_bit(PG_arch_1, &page->flags);
69}
70
71
72
73
74
75
76void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
77{
78 unsigned long pfn = PHYS_PFN(paddr);
79
80 do {
81 set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
82 } while (++pfn <= PHYS_PFN(paddr + size - 1));
83}
84
85inline void
86ia64_set_rbs_bot (void)
87{
88 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
89
90 if (stack_size > MAX_USER_STACK_SIZE)
91 stack_size = MAX_USER_STACK_SIZE;
92 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
93}
94
95
96
97
98
99
100
101void
102ia64_init_addr_space (void)
103{
104 struct vm_area_struct *vma;
105
106 ia64_set_rbs_bot();
107
108
109
110
111
112
113 vma = vm_area_alloc(current->mm);
114 if (vma) {
115 vma_set_anonymous(vma);
116 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
117 vma->vm_end = vma->vm_start + PAGE_SIZE;
118 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
119 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
120 mmap_write_lock(current->mm);
121 if (insert_vm_struct(current->mm, vma)) {
122 mmap_write_unlock(current->mm);
123 vm_area_free(vma);
124 return;
125 }
126 mmap_write_unlock(current->mm);
127 }
128
129
130 if (!(current->personality & MMAP_PAGE_ZERO)) {
131 vma = vm_area_alloc(current->mm);
132 if (vma) {
133 vma_set_anonymous(vma);
134 vma->vm_end = PAGE_SIZE;
135 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
136 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
137 VM_DONTEXPAND | VM_DONTDUMP;
138 mmap_write_lock(current->mm);
139 if (insert_vm_struct(current->mm, vma)) {
140 mmap_write_unlock(current->mm);
141 vm_area_free(vma);
142 return;
143 }
144 mmap_write_unlock(current->mm);
145 }
146 }
147}
148
149void
150free_initmem (void)
151{
152 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
153 -1, "unused kernel");
154}
155
156void __init
157free_initrd_mem (unsigned long start, unsigned long end)
158{
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190 start = PAGE_ALIGN(start);
191 end = end & PAGE_MASK;
192
193 if (start < end)
194 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
195
196 for (; start < end; start += PAGE_SIZE) {
197 if (!virt_addr_valid(start))
198 continue;
199 free_reserved_page(virt_to_page(start));
200 }
201}
202
203
204
205
206static struct page * __init
207put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
208{
209 pgd_t *pgd;
210 p4d_t *p4d;
211 pud_t *pud;
212 pmd_t *pmd;
213 pte_t *pte;
214
215 pgd = pgd_offset_k(address);
216
217 {
218 p4d = p4d_alloc(&init_mm, pgd, address);
219 if (!p4d)
220 goto out;
221 pud = pud_alloc(&init_mm, p4d, address);
222 if (!pud)
223 goto out;
224 pmd = pmd_alloc(&init_mm, pud, address);
225 if (!pmd)
226 goto out;
227 pte = pte_alloc_kernel(pmd, address);
228 if (!pte)
229 goto out;
230 if (!pte_none(*pte))
231 goto out;
232 set_pte(pte, mk_pte(page, pgprot));
233 }
234 out:
235
236 return page;
237}
238
239static void __init
240setup_gate (void)
241{
242 struct page *page;
243
244
245
246
247
248
249 page = virt_to_page(ia64_imva(__start_gate_section));
250 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
251#ifdef HAVE_BUGGY_SEGREL
252 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
253 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
254#else
255 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
256
257 {
258 unsigned long addr;
259
260 for (addr = GATE_ADDR + PAGE_SIZE;
261 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
262 addr += PAGE_SIZE)
263 {
264 put_kernel_page(ZERO_PAGE(0), addr,
265 PAGE_READONLY);
266 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
267 PAGE_READONLY);
268 }
269 }
270#endif
271 ia64_patch_gate();
272}
273
274static struct vm_area_struct gate_vma;
275
276static int __init gate_vma_init(void)
277{
278 vma_init(&gate_vma, NULL);
279 gate_vma.vm_start = FIXADDR_USER_START;
280 gate_vma.vm_end = FIXADDR_USER_END;
281 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
282 gate_vma.vm_page_prot = __P101;
283
284 return 0;
285}
286__initcall(gate_vma_init);
287
288struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
289{
290 return &gate_vma;
291}
292
293int in_gate_area_no_mm(unsigned long addr)
294{
295 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
296 return 1;
297 return 0;
298}
299
300int in_gate_area(struct mm_struct *mm, unsigned long addr)
301{
302 return in_gate_area_no_mm(addr);
303}
304
305void ia64_mmu_init(void *my_cpu_data)
306{
307 unsigned long pta, impl_va_bits;
308 extern void tlb_init(void);
309
310#ifdef CONFIG_DISABLE_VHPT
311# define VHPT_ENABLE_BIT 0
312#else
313# define VHPT_ENABLE_BIT 1
314#endif
315
316
317
318
319
320
321
322
323
324
325
326
327# define pte_bits 3
328# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
329
330
331
332
333
334
335
336# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
337# define POW2(n) (1ULL << (n))
338
339 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
340
341 if (impl_va_bits < 51 || impl_va_bits > 61)
342 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
343
344
345
346
347
348
349 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
350 (mapped_space_bits > impl_va_bits - 1))
351 panic("Cannot build a big enough virtual-linear page table"
352 " to cover mapped address space.\n"
353 " Try using a smaller page size.\n");
354
355
356
357 pta = POW2(61) - POW2(vmlpt_bits);
358
359
360
361
362
363
364
365 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
366
367 ia64_tlb_init();
368
369#ifdef CONFIG_HUGETLB_PAGE
370 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
371 ia64_srlz_d();
372#endif
373}
374
375#ifdef CONFIG_VIRTUAL_MEM_MAP
376int vmemmap_find_next_valid_pfn(int node, int i)
377{
378 unsigned long end_address, hole_next_pfn;
379 unsigned long stop_address;
380 pg_data_t *pgdat = NODE_DATA(node);
381
382 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
383 end_address = PAGE_ALIGN(end_address);
384 stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
385
386 do {
387 pgd_t *pgd;
388 p4d_t *p4d;
389 pud_t *pud;
390 pmd_t *pmd;
391 pte_t *pte;
392
393 pgd = pgd_offset_k(end_address);
394 if (pgd_none(*pgd)) {
395 end_address += PGDIR_SIZE;
396 continue;
397 }
398
399 p4d = p4d_offset(pgd, end_address);
400 if (p4d_none(*p4d)) {
401 end_address += P4D_SIZE;
402 continue;
403 }
404
405 pud = pud_offset(p4d, end_address);
406 if (pud_none(*pud)) {
407 end_address += PUD_SIZE;
408 continue;
409 }
410
411 pmd = pmd_offset(pud, end_address);
412 if (pmd_none(*pmd)) {
413 end_address += PMD_SIZE;
414 continue;
415 }
416
417 pte = pte_offset_kernel(pmd, end_address);
418retry_pte:
419 if (pte_none(*pte)) {
420 end_address += PAGE_SIZE;
421 pte++;
422 if ((end_address < stop_address) &&
423 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
424 goto retry_pte;
425 continue;
426 }
427
428 break;
429 } while (end_address < stop_address);
430
431 end_address = min(end_address, stop_address);
432 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
433 hole_next_pfn = end_address / sizeof(struct page);
434 return hole_next_pfn - pgdat->node_start_pfn;
435}
436
437int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
438{
439 unsigned long address, start_page, end_page;
440 struct page *map_start, *map_end;
441 int node;
442 pgd_t *pgd;
443 p4d_t *p4d;
444 pud_t *pud;
445 pmd_t *pmd;
446 pte_t *pte;
447
448 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
449 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
450
451 start_page = (unsigned long) map_start & PAGE_MASK;
452 end_page = PAGE_ALIGN((unsigned long) map_end);
453 node = paddr_to_nid(__pa(start));
454
455 for (address = start_page; address < end_page; address += PAGE_SIZE) {
456 pgd = pgd_offset_k(address);
457 if (pgd_none(*pgd)) {
458 p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
459 if (!p4d)
460 goto err_alloc;
461 pgd_populate(&init_mm, pgd, p4d);
462 }
463 p4d = p4d_offset(pgd, address);
464
465 if (p4d_none(*p4d)) {
466 pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
467 if (!pud)
468 goto err_alloc;
469 p4d_populate(&init_mm, p4d, pud);
470 }
471 pud = pud_offset(p4d, address);
472
473 if (pud_none(*pud)) {
474 pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
475 if (!pmd)
476 goto err_alloc;
477 pud_populate(&init_mm, pud, pmd);
478 }
479 pmd = pmd_offset(pud, address);
480
481 if (pmd_none(*pmd)) {
482 pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
483 if (!pte)
484 goto err_alloc;
485 pmd_populate_kernel(&init_mm, pmd, pte);
486 }
487 pte = pte_offset_kernel(pmd, address);
488
489 if (pte_none(*pte)) {
490 void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
491 node);
492 if (!page)
493 goto err_alloc;
494 set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
495 PAGE_KERNEL));
496 }
497 }
498 return 0;
499
500err_alloc:
501 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
502 __func__, PAGE_SIZE, PAGE_SIZE, node);
503 return -ENOMEM;
504}
505
506struct memmap_init_callback_data {
507 struct page *start;
508 struct page *end;
509 int nid;
510 unsigned long zone;
511};
512
513static int __meminit
514virtual_memmap_init(u64 start, u64 end, void *arg)
515{
516 struct memmap_init_callback_data *args;
517 struct page *map_start, *map_end;
518
519 args = (struct memmap_init_callback_data *) arg;
520 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
521 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
522
523 if (map_start < args->start)
524 map_start = args->start;
525 if (map_end > args->end)
526 map_end = args->end;
527
528
529
530
531
532
533 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
534 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
535 / sizeof(struct page));
536
537 if (map_start < map_end)
538 memmap_init_zone((unsigned long)(map_end - map_start),
539 args->nid, args->zone, page_to_pfn(map_start),
540 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
541 return 0;
542}
543
544void __meminit
545memmap_init (unsigned long size, int nid, unsigned long zone,
546 unsigned long start_pfn)
547{
548 if (!vmem_map) {
549 memmap_init_zone(size, nid, zone, start_pfn,
550 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
551 } else {
552 struct page *start;
553 struct memmap_init_callback_data args;
554
555 start = pfn_to_page(start_pfn);
556 args.start = start;
557 args.end = start + size;
558 args.nid = nid;
559 args.zone = zone;
560
561 efi_memmap_walk(virtual_memmap_init, &args);
562 }
563}
564
565int
566ia64_pfn_valid (unsigned long pfn)
567{
568 char byte;
569 struct page *pg = pfn_to_page(pfn);
570
571 return (__get_user(byte, (char __user *) pg) == 0)
572 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
573 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
574}
575EXPORT_SYMBOL(ia64_pfn_valid);
576
577int __init find_largest_hole(u64 start, u64 end, void *arg)
578{
579 u64 *max_gap = arg;
580
581 static u64 last_end = PAGE_OFFSET;
582
583
584
585 if (*max_gap < (start - last_end))
586 *max_gap = start - last_end;
587 last_end = end;
588 return 0;
589}
590
591#endif
592
593int __init register_active_ranges(u64 start, u64 len, int nid)
594{
595 u64 end = start + len;
596
597#ifdef CONFIG_KEXEC
598 if (start > crashk_res.start && start < crashk_res.end)
599 start = crashk_res.end;
600 if (end > crashk_res.start && end < crashk_res.end)
601 end = crashk_res.start;
602#endif
603
604 if (start < end)
605 memblock_add_node(__pa(start), end - start, nid);
606 return 0;
607}
608
609int
610find_max_min_low_pfn (u64 start, u64 end, void *arg)
611{
612 unsigned long pfn_start, pfn_end;
613#ifdef CONFIG_FLATMEM
614 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
615 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
616#else
617 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
618 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
619#endif
620 min_low_pfn = min(min_low_pfn, pfn_start);
621 max_low_pfn = max(max_low_pfn, pfn_end);
622 return 0;
623}
624
625
626
627
628
629
630
631
632
633static int nolwsys __initdata;
634
635static int __init
636nolwsys_setup (char *s)
637{
638 nolwsys = 1;
639 return 1;
640}
641
642__setup("nolwsys", nolwsys_setup);
643
644void __init
645mem_init (void)
646{
647 int i;
648
649 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
650 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
651 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
652
653
654
655
656
657
658#ifdef CONFIG_INTEL_IOMMU
659 detect_intel_iommu();
660 if (!iommu_detected)
661#endif
662#ifdef CONFIG_SWIOTLB
663 swiotlb_init(1);
664#endif
665
666#ifdef CONFIG_FLATMEM
667 BUG_ON(!mem_map);
668#endif
669
670 set_max_mapnr(max_low_pfn);
671 high_memory = __va(max_low_pfn * PAGE_SIZE);
672 memblock_free_all();
673 mem_init_print_info(NULL);
674
675
676
677
678
679
680 for (i = 0; i < NR_syscalls; ++i) {
681 extern unsigned long fsyscall_table[NR_syscalls];
682 extern unsigned long sys_call_table[NR_syscalls];
683
684 if (!fsyscall_table[i] || nolwsys)
685 fsyscall_table[i] = sys_call_table[i] | 1;
686 }
687 setup_gate();
688}
689
690#ifdef CONFIG_MEMORY_HOTPLUG
691int arch_add_memory(int nid, u64 start, u64 size,
692 struct mhp_params *params)
693{
694 unsigned long start_pfn = start >> PAGE_SHIFT;
695 unsigned long nr_pages = size >> PAGE_SHIFT;
696 int ret;
697
698 if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
699 return -EINVAL;
700
701 ret = __add_pages(nid, start_pfn, nr_pages, params);
702 if (ret)
703 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
704 __func__, ret);
705
706 return ret;
707}
708
709void arch_remove_memory(int nid, u64 start, u64 size,
710 struct vmem_altmap *altmap)
711{
712 unsigned long start_pfn = start >> PAGE_SHIFT;
713 unsigned long nr_pages = size >> PAGE_SHIFT;
714
715 __remove_pages(start_pfn, nr_pages, altmap);
716}
717#endif
718