1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10
11#include <linux/bootmem.h>
12#include <linux/efi.h>
13#include <linux/elf.h>
14#include <linux/memblock.h>
15#include <linux/mm.h>
16#include <linux/sched/signal.h>
17#include <linux/mmzone.h>
18#include <linux/module.h>
19#include <linux/personality.h>
20#include <linux/reboot.h>
21#include <linux/slab.h>
22#include <linux/swap.h>
23#include <linux/proc_fs.h>
24#include <linux/bitops.h>
25#include <linux/kexec.h>
26
27#include <asm/dma.h>
28#include <asm/io.h>
29#include <asm/machvec.h>
30#include <asm/numa.h>
31#include <asm/patch.h>
32#include <asm/pgalloc.h>
33#include <asm/sal.h>
34#include <asm/sections.h>
35#include <asm/tlb.h>
36#include <linux/uaccess.h>
37#include <asm/unistd.h>
38#include <asm/mca.h>
39
40extern void ia64_tlb_init (void);
41
42unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
43
44#ifdef CONFIG_VIRTUAL_MEM_MAP
45unsigned long VMALLOC_END = VMALLOC_END_INIT;
46EXPORT_SYMBOL(VMALLOC_END);
47struct page *vmem_map;
48EXPORT_SYMBOL(vmem_map);
49#endif
50
51struct page *zero_page_memmap_ptr;
52EXPORT_SYMBOL(zero_page_memmap_ptr);
53
54void
55__ia64_sync_icache_dcache (pte_t pte)
56{
57 unsigned long addr;
58 struct page *page;
59
60 page = pte_page(pte);
61 addr = (unsigned long) page_address(page);
62
63 if (test_bit(PG_arch_1, &page->flags))
64 return;
65
66 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
67 set_bit(PG_arch_1, &page->flags);
68}
69
70
71
72
73
74
75void
76dma_mark_clean(void *addr, size_t size)
77{
78 unsigned long pg_addr, end;
79
80 pg_addr = PAGE_ALIGN((unsigned long) addr);
81 end = (unsigned long) addr + size;
82 while (pg_addr + PAGE_SIZE <= end) {
83 struct page *page = virt_to_page(pg_addr);
84 set_bit(PG_arch_1, &page->flags);
85 pg_addr += PAGE_SIZE;
86 }
87}
88
89inline void
90ia64_set_rbs_bot (void)
91{
92 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
93
94 if (stack_size > MAX_USER_STACK_SIZE)
95 stack_size = MAX_USER_STACK_SIZE;
96 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
97}
98
99
100
101
102
103
104
105void
106ia64_init_addr_space (void)
107{
108 struct vm_area_struct *vma;
109
110 ia64_set_rbs_bot();
111
112
113
114
115
116
117 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
118 if (vma) {
119 INIT_LIST_HEAD(&vma->anon_vma_chain);
120 vma->vm_mm = current->mm;
121 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
122 vma->vm_end = vma->vm_start + PAGE_SIZE;
123 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
124 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
125 down_write(¤t->mm->mmap_sem);
126 if (insert_vm_struct(current->mm, vma)) {
127 up_write(¤t->mm->mmap_sem);
128 kmem_cache_free(vm_area_cachep, vma);
129 return;
130 }
131 up_write(¤t->mm->mmap_sem);
132 }
133
134
135 if (!(current->personality & MMAP_PAGE_ZERO)) {
136 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
137 if (vma) {
138 INIT_LIST_HEAD(&vma->anon_vma_chain);
139 vma->vm_mm = current->mm;
140 vma->vm_end = PAGE_SIZE;
141 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
142 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
143 VM_DONTEXPAND | VM_DONTDUMP;
144 down_write(¤t->mm->mmap_sem);
145 if (insert_vm_struct(current->mm, vma)) {
146 up_write(¤t->mm->mmap_sem);
147 kmem_cache_free(vm_area_cachep, vma);
148 return;
149 }
150 up_write(¤t->mm->mmap_sem);
151 }
152 }
153}
154
155void
156free_initmem (void)
157{
158 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
159 -1, "unused kernel");
160}
161
162void __init
163free_initrd_mem (unsigned long start, unsigned long end)
164{
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196 start = PAGE_ALIGN(start);
197 end = end & PAGE_MASK;
198
199 if (start < end)
200 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
201
202 for (; start < end; start += PAGE_SIZE) {
203 if (!virt_addr_valid(start))
204 continue;
205 free_reserved_page(virt_to_page(start));
206 }
207}
208
209
210
211
212static struct page * __init
213put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
214{
215 pgd_t *pgd;
216 pud_t *pud;
217 pmd_t *pmd;
218 pte_t *pte;
219
220 pgd = pgd_offset_k(address);
221
222 {
223 pud = pud_alloc(&init_mm, pgd, address);
224 if (!pud)
225 goto out;
226 pmd = pmd_alloc(&init_mm, pud, address);
227 if (!pmd)
228 goto out;
229 pte = pte_alloc_kernel(pmd, address);
230 if (!pte)
231 goto out;
232 if (!pte_none(*pte))
233 goto out;
234 set_pte(pte, mk_pte(page, pgprot));
235 }
236 out:
237
238 return page;
239}
240
241static void __init
242setup_gate (void)
243{
244 struct page *page;
245
246
247
248
249
250
251 page = virt_to_page(ia64_imva(__start_gate_section));
252 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
253#ifdef HAVE_BUGGY_SEGREL
254 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
255 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
256#else
257 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
258
259 {
260 unsigned long addr;
261
262 for (addr = GATE_ADDR + PAGE_SIZE;
263 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
264 addr += PAGE_SIZE)
265 {
266 put_kernel_page(ZERO_PAGE(0), addr,
267 PAGE_READONLY);
268 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
269 PAGE_READONLY);
270 }
271 }
272#endif
273 ia64_patch_gate();
274}
275
276static struct vm_area_struct gate_vma;
277
278static int __init gate_vma_init(void)
279{
280 gate_vma.vm_mm = NULL;
281 gate_vma.vm_start = FIXADDR_USER_START;
282 gate_vma.vm_end = FIXADDR_USER_END;
283 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
284 gate_vma.vm_page_prot = __P101;
285
286 return 0;
287}
288__initcall(gate_vma_init);
289
290struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
291{
292 return &gate_vma;
293}
294
295int in_gate_area_no_mm(unsigned long addr)
296{
297 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
298 return 1;
299 return 0;
300}
301
302int in_gate_area(struct mm_struct *mm, unsigned long addr)
303{
304 return in_gate_area_no_mm(addr);
305}
306
307void ia64_mmu_init(void *my_cpu_data)
308{
309 unsigned long pta, impl_va_bits;
310 extern void tlb_init(void);
311
312#ifdef CONFIG_DISABLE_VHPT
313# define VHPT_ENABLE_BIT 0
314#else
315# define VHPT_ENABLE_BIT 1
316#endif
317
318
319
320
321
322
323
324
325
326
327
328
329# define pte_bits 3
330# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
331
332
333
334
335
336
337
338# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
339# define POW2(n) (1ULL << (n))
340
341 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
342
343 if (impl_va_bits < 51 || impl_va_bits > 61)
344 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
345
346
347
348
349
350
351 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
352 (mapped_space_bits > impl_va_bits - 1))
353 panic("Cannot build a big enough virtual-linear page table"
354 " to cover mapped address space.\n"
355 " Try using a smaller page size.\n");
356
357
358
359 pta = POW2(61) - POW2(vmlpt_bits);
360
361
362
363
364
365
366
367 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
368
369 ia64_tlb_init();
370
371#ifdef CONFIG_HUGETLB_PAGE
372 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
373 ia64_srlz_d();
374#endif
375}
376
377#ifdef CONFIG_VIRTUAL_MEM_MAP
378int vmemmap_find_next_valid_pfn(int node, int i)
379{
380 unsigned long end_address, hole_next_pfn;
381 unsigned long stop_address;
382 pg_data_t *pgdat = NODE_DATA(node);
383
384 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
385 end_address = PAGE_ALIGN(end_address);
386 stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
387
388 do {
389 pgd_t *pgd;
390 pud_t *pud;
391 pmd_t *pmd;
392 pte_t *pte;
393
394 pgd = pgd_offset_k(end_address);
395 if (pgd_none(*pgd)) {
396 end_address += PGDIR_SIZE;
397 continue;
398 }
399
400 pud = pud_offset(pgd, end_address);
401 if (pud_none(*pud)) {
402 end_address += PUD_SIZE;
403 continue;
404 }
405
406 pmd = pmd_offset(pud, end_address);
407 if (pmd_none(*pmd)) {
408 end_address += PMD_SIZE;
409 continue;
410 }
411
412 pte = pte_offset_kernel(pmd, end_address);
413retry_pte:
414 if (pte_none(*pte)) {
415 end_address += PAGE_SIZE;
416 pte++;
417 if ((end_address < stop_address) &&
418 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
419 goto retry_pte;
420 continue;
421 }
422
423 break;
424 } while (end_address < stop_address);
425
426 end_address = min(end_address, stop_address);
427 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
428 hole_next_pfn = end_address / sizeof(struct page);
429 return hole_next_pfn - pgdat->node_start_pfn;
430}
431
432int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
433{
434 unsigned long address, start_page, end_page;
435 struct page *map_start, *map_end;
436 int node;
437 pgd_t *pgd;
438 pud_t *pud;
439 pmd_t *pmd;
440 pte_t *pte;
441
442 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
443 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
444
445 start_page = (unsigned long) map_start & PAGE_MASK;
446 end_page = PAGE_ALIGN((unsigned long) map_end);
447 node = paddr_to_nid(__pa(start));
448
449 for (address = start_page; address < end_page; address += PAGE_SIZE) {
450 pgd = pgd_offset_k(address);
451 if (pgd_none(*pgd))
452 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
453 pud = pud_offset(pgd, address);
454
455 if (pud_none(*pud))
456 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
457 pmd = pmd_offset(pud, address);
458
459 if (pmd_none(*pmd))
460 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
461 pte = pte_offset_kernel(pmd, address);
462
463 if (pte_none(*pte))
464 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
465 PAGE_KERNEL));
466 }
467 return 0;
468}
469
470struct memmap_init_callback_data {
471 struct page *start;
472 struct page *end;
473 int nid;
474 unsigned long zone;
475};
476
477static int __meminit
478virtual_memmap_init(u64 start, u64 end, void *arg)
479{
480 struct memmap_init_callback_data *args;
481 struct page *map_start, *map_end;
482
483 args = (struct memmap_init_callback_data *) arg;
484 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
485 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
486
487 if (map_start < args->start)
488 map_start = args->start;
489 if (map_end > args->end)
490 map_end = args->end;
491
492
493
494
495
496
497 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
498 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
499 / sizeof(struct page));
500
501 if (map_start < map_end)
502 memmap_init_zone((unsigned long)(map_end - map_start),
503 args->nid, args->zone, page_to_pfn(map_start),
504 MEMMAP_EARLY);
505 return 0;
506}
507
508void __meminit
509memmap_init (unsigned long size, int nid, unsigned long zone,
510 unsigned long start_pfn)
511{
512 if (!vmem_map)
513 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
514 else {
515 struct page *start;
516 struct memmap_init_callback_data args;
517
518 start = pfn_to_page(start_pfn);
519 args.start = start;
520 args.end = start + size;
521 args.nid = nid;
522 args.zone = zone;
523
524 efi_memmap_walk(virtual_memmap_init, &args);
525 }
526}
527
528int
529ia64_pfn_valid (unsigned long pfn)
530{
531 char byte;
532 struct page *pg = pfn_to_page(pfn);
533
534 return (__get_user(byte, (char __user *) pg) == 0)
535 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
536 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
537}
538EXPORT_SYMBOL(ia64_pfn_valid);
539
540int __init find_largest_hole(u64 start, u64 end, void *arg)
541{
542 u64 *max_gap = arg;
543
544 static u64 last_end = PAGE_OFFSET;
545
546
547
548 if (*max_gap < (start - last_end))
549 *max_gap = start - last_end;
550 last_end = end;
551 return 0;
552}
553
554#endif
555
556int __init register_active_ranges(u64 start, u64 len, int nid)
557{
558 u64 end = start + len;
559
560#ifdef CONFIG_KEXEC
561 if (start > crashk_res.start && start < crashk_res.end)
562 start = crashk_res.end;
563 if (end > crashk_res.start && end < crashk_res.end)
564 end = crashk_res.start;
565#endif
566
567 if (start < end)
568 memblock_add_node(__pa(start), end - start, nid);
569 return 0;
570}
571
572int
573find_max_min_low_pfn (u64 start, u64 end, void *arg)
574{
575 unsigned long pfn_start, pfn_end;
576#ifdef CONFIG_FLATMEM
577 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
578 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
579#else
580 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
581 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
582#endif
583 min_low_pfn = min(min_low_pfn, pfn_start);
584 max_low_pfn = max(max_low_pfn, pfn_end);
585 return 0;
586}
587
588
589
590
591
592
593
594
595
596static int nolwsys __initdata;
597
598static int __init
599nolwsys_setup (char *s)
600{
601 nolwsys = 1;
602 return 1;
603}
604
605__setup("nolwsys", nolwsys_setup);
606
607void __init
608mem_init (void)
609{
610 int i;
611
612 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
613 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
614 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
615
616#ifdef CONFIG_PCI
617
618
619
620
621
622 platform_dma_init();
623#endif
624
625#ifdef CONFIG_FLATMEM
626 BUG_ON(!mem_map);
627#endif
628
629 set_max_mapnr(max_low_pfn);
630 high_memory = __va(max_low_pfn * PAGE_SIZE);
631 free_all_bootmem();
632 mem_init_print_info(NULL);
633
634
635
636
637
638
639 for (i = 0; i < NR_syscalls; ++i) {
640 extern unsigned long fsyscall_table[NR_syscalls];
641 extern unsigned long sys_call_table[NR_syscalls];
642
643 if (!fsyscall_table[i] || nolwsys)
644 fsyscall_table[i] = sys_call_table[i] | 1;
645 }
646 setup_gate();
647}
648
649#ifdef CONFIG_MEMORY_HOTPLUG
650int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
651{
652 unsigned long start_pfn = start >> PAGE_SHIFT;
653 unsigned long nr_pages = size >> PAGE_SHIFT;
654 int ret;
655
656 ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
657 if (ret)
658 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
659 __func__, ret);
660
661 return ret;
662}
663
664#ifdef CONFIG_MEMORY_HOTREMOVE
665int arch_remove_memory(u64 start, u64 size)
666{
667 unsigned long start_pfn = start >> PAGE_SHIFT;
668 unsigned long nr_pages = size >> PAGE_SHIFT;
669 struct zone *zone;
670 int ret;
671
672 zone = page_zone(pfn_to_page(start_pfn));
673 ret = __remove_pages(zone, start_pfn, nr_pages);
674 if (ret)
675 pr_warn("%s: Problem encountered in __remove_pages() as"
676 " ret=%d\n", __func__, ret);
677
678 return ret;
679}
680#endif
681#endif
682