1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10
11#include <linux/dma-map-ops.h>
12#include <linux/dmar.h>
13#include <linux/efi.h>
14#include <linux/elf.h>
15#include <linux/memblock.h>
16#include <linux/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/mmzone.h>
19#include <linux/module.h>
20#include <linux/personality.h>
21#include <linux/reboot.h>
22#include <linux/slab.h>
23#include <linux/swap.h>
24#include <linux/proc_fs.h>
25#include <linux/bitops.h>
26#include <linux/kexec.h>
27#include <linux/swiotlb.h>
28
29#include <asm/dma.h>
30#include <asm/efi.h>
31#include <asm/io.h>
32#include <asm/numa.h>
33#include <asm/patch.h>
34#include <asm/pgalloc.h>
35#include <asm/sal.h>
36#include <asm/sections.h>
37#include <asm/tlb.h>
38#include <linux/uaccess.h>
39#include <asm/unistd.h>
40#include <asm/mca.h>
41
42extern void ia64_tlb_init (void);
43
44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
45
46struct page *zero_page_memmap_ptr;
47EXPORT_SYMBOL(zero_page_memmap_ptr);
48
49void
50__ia64_sync_icache_dcache (pte_t pte)
51{
52 unsigned long addr;
53 struct page *page;
54
55 page = pte_page(pte);
56 addr = (unsigned long) page_address(page);
57
58 if (test_bit(PG_arch_1, &page->flags))
59 return;
60
61 flush_icache_range(addr, addr + page_size(page));
62 set_bit(PG_arch_1, &page->flags);
63}
64
65
66
67
68
69
70void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
71{
72 unsigned long pfn = PHYS_PFN(paddr);
73
74 do {
75 set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
76 } while (++pfn <= PHYS_PFN(paddr + size - 1));
77}
78
79inline void
80ia64_set_rbs_bot (void)
81{
82 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
83
84 if (stack_size > MAX_USER_STACK_SIZE)
85 stack_size = MAX_USER_STACK_SIZE;
86 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
87}
88
89
90
91
92
93
94
95void
96ia64_init_addr_space (void)
97{
98 struct vm_area_struct *vma;
99
100 ia64_set_rbs_bot();
101
102
103
104
105
106
107 vma = vm_area_alloc(current->mm);
108 if (vma) {
109 vma_set_anonymous(vma);
110 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
111 vma->vm_end = vma->vm_start + PAGE_SIZE;
112 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
113 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
114 mmap_write_lock(current->mm);
115 if (insert_vm_struct(current->mm, vma)) {
116 mmap_write_unlock(current->mm);
117 vm_area_free(vma);
118 return;
119 }
120 mmap_write_unlock(current->mm);
121 }
122
123
124 if (!(current->personality & MMAP_PAGE_ZERO)) {
125 vma = vm_area_alloc(current->mm);
126 if (vma) {
127 vma_set_anonymous(vma);
128 vma->vm_end = PAGE_SIZE;
129 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
130 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
131 VM_DONTEXPAND | VM_DONTDUMP;
132 mmap_write_lock(current->mm);
133 if (insert_vm_struct(current->mm, vma)) {
134 mmap_write_unlock(current->mm);
135 vm_area_free(vma);
136 return;
137 }
138 mmap_write_unlock(current->mm);
139 }
140 }
141}
142
143void
144free_initmem (void)
145{
146 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
147 -1, "unused kernel");
148}
149
150void __init
151free_initrd_mem (unsigned long start, unsigned long end)
152{
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184 start = PAGE_ALIGN(start);
185 end = end & PAGE_MASK;
186
187 if (start < end)
188 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
189
190 for (; start < end; start += PAGE_SIZE) {
191 if (!virt_addr_valid(start))
192 continue;
193 free_reserved_page(virt_to_page(start));
194 }
195}
196
197
198
199
200static struct page * __init
201put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
202{
203 pgd_t *pgd;
204 p4d_t *p4d;
205 pud_t *pud;
206 pmd_t *pmd;
207 pte_t *pte;
208
209 pgd = pgd_offset_k(address);
210
211 {
212 p4d = p4d_alloc(&init_mm, pgd, address);
213 if (!p4d)
214 goto out;
215 pud = pud_alloc(&init_mm, p4d, address);
216 if (!pud)
217 goto out;
218 pmd = pmd_alloc(&init_mm, pud, address);
219 if (!pmd)
220 goto out;
221 pte = pte_alloc_kernel(pmd, address);
222 if (!pte)
223 goto out;
224 if (!pte_none(*pte))
225 goto out;
226 set_pte(pte, mk_pte(page, pgprot));
227 }
228 out:
229
230 return page;
231}
232
233static void __init
234setup_gate (void)
235{
236 struct page *page;
237
238
239
240
241
242
243 page = virt_to_page(ia64_imva(__start_gate_section));
244 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
245#ifdef HAVE_BUGGY_SEGREL
246 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
247 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
248#else
249 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
250
251 {
252 unsigned long addr;
253
254 for (addr = GATE_ADDR + PAGE_SIZE;
255 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
256 addr += PAGE_SIZE)
257 {
258 put_kernel_page(ZERO_PAGE(0), addr,
259 PAGE_READONLY);
260 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
261 PAGE_READONLY);
262 }
263 }
264#endif
265 ia64_patch_gate();
266}
267
268static struct vm_area_struct gate_vma;
269
270static int __init gate_vma_init(void)
271{
272 vma_init(&gate_vma, NULL);
273 gate_vma.vm_start = FIXADDR_USER_START;
274 gate_vma.vm_end = FIXADDR_USER_END;
275 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
276 gate_vma.vm_page_prot = __P101;
277
278 return 0;
279}
280__initcall(gate_vma_init);
281
282struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
283{
284 return &gate_vma;
285}
286
287int in_gate_area_no_mm(unsigned long addr)
288{
289 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
290 return 1;
291 return 0;
292}
293
294int in_gate_area(struct mm_struct *mm, unsigned long addr)
295{
296 return in_gate_area_no_mm(addr);
297}
298
299void ia64_mmu_init(void *my_cpu_data)
300{
301 unsigned long pta, impl_va_bits;
302 extern void tlb_init(void);
303
304#ifdef CONFIG_DISABLE_VHPT
305# define VHPT_ENABLE_BIT 0
306#else
307# define VHPT_ENABLE_BIT 1
308#endif
309
310
311
312
313
314
315
316
317
318
319
320
321# define pte_bits 3
322# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
323
324
325
326
327
328
329
330# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
331# define POW2(n) (1ULL << (n))
332
333 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
334
335 if (impl_va_bits < 51 || impl_va_bits > 61)
336 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
337
338
339
340
341
342
343 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
344 (mapped_space_bits > impl_va_bits - 1))
345 panic("Cannot build a big enough virtual-linear page table"
346 " to cover mapped address space.\n"
347 " Try using a smaller page size.\n");
348
349
350
351 pta = POW2(61) - POW2(vmlpt_bits);
352
353
354
355
356
357
358
359 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
360
361 ia64_tlb_init();
362
363#ifdef CONFIG_HUGETLB_PAGE
364 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
365 ia64_srlz_d();
366#endif
367}
368
369int __init register_active_ranges(u64 start, u64 len, int nid)
370{
371 u64 end = start + len;
372
373#ifdef CONFIG_KEXEC
374 if (start > crashk_res.start && start < crashk_res.end)
375 start = crashk_res.end;
376 if (end > crashk_res.start && end < crashk_res.end)
377 end = crashk_res.start;
378#endif
379
380 if (start < end)
381 memblock_add_node(__pa(start), end - start, nid);
382 return 0;
383}
384
385int
386find_max_min_low_pfn (u64 start, u64 end, void *arg)
387{
388 unsigned long pfn_start, pfn_end;
389#ifdef CONFIG_FLATMEM
390 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
391 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
392#else
393 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
394 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
395#endif
396 min_low_pfn = min(min_low_pfn, pfn_start);
397 max_low_pfn = max(max_low_pfn, pfn_end);
398 return 0;
399}
400
401
402
403
404
405
406
407
408
409static int nolwsys __initdata;
410
411static int __init
412nolwsys_setup (char *s)
413{
414 nolwsys = 1;
415 return 1;
416}
417
418__setup("nolwsys", nolwsys_setup);
419
420void __init
421mem_init (void)
422{
423 int i;
424
425 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
426 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
427 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
428
429
430
431
432
433
434 do {
435#ifdef CONFIG_INTEL_IOMMU
436 detect_intel_iommu();
437 if (iommu_detected)
438 break;
439#endif
440#ifdef CONFIG_SWIOTLB
441 swiotlb_init(1);
442#endif
443 } while (0);
444
445#ifdef CONFIG_FLATMEM
446 BUG_ON(!mem_map);
447#endif
448
449 set_max_mapnr(max_low_pfn);
450 high_memory = __va(max_low_pfn * PAGE_SIZE);
451 memblock_free_all();
452
453
454
455
456
457
458 for (i = 0; i < NR_syscalls; ++i) {
459 extern unsigned long fsyscall_table[NR_syscalls];
460 extern unsigned long sys_call_table[NR_syscalls];
461
462 if (!fsyscall_table[i] || nolwsys)
463 fsyscall_table[i] = sys_call_table[i] | 1;
464 }
465 setup_gate();
466}
467
468#ifdef CONFIG_MEMORY_HOTPLUG
469int arch_add_memory(int nid, u64 start, u64 size,
470 struct mhp_params *params)
471{
472 unsigned long start_pfn = start >> PAGE_SHIFT;
473 unsigned long nr_pages = size >> PAGE_SHIFT;
474 int ret;
475
476 if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
477 return -EINVAL;
478
479 ret = __add_pages(nid, start_pfn, nr_pages, params);
480 if (ret)
481 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
482 __func__, ret);
483
484 return ret;
485}
486
487void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
488{
489 unsigned long start_pfn = start >> PAGE_SHIFT;
490 unsigned long nr_pages = size >> PAGE_SHIFT;
491
492 __remove_pages(start_pfn, nr_pages, altmap);
493}
494#endif
495