1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9
10#include <linux/bootmem.h>
11#include <linux/efi.h>
12#include <linux/elf.h>
13#include <linux/mm.h>
14#include <linux/mmzone.h>
15#include <linux/module.h>
16#include <linux/personality.h>
17#include <linux/reboot.h>
18#include <linux/slab.h>
19#include <linux/swap.h>
20#include <linux/proc_fs.h>
21#include <linux/bitops.h>
22#include <linux/kexec.h>
23
24#include <asm/a.out.h>
25#include <asm/dma.h>
26#include <asm/ia32.h>
27#include <asm/io.h>
28#include <asm/machvec.h>
29#include <asm/numa.h>
30#include <asm/patch.h>
31#include <asm/pgalloc.h>
32#include <asm/sal.h>
33#include <asm/sections.h>
34#include <asm/system.h>
35#include <asm/tlb.h>
36#include <asm/uaccess.h>
37#include <asm/unistd.h>
38#include <asm/mca.h>
39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
42extern void ia64_tlb_init (void);
43
44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
45
46#ifdef CONFIG_VIRTUAL_MEM_MAP
47unsigned long vmalloc_end = VMALLOC_END_INIT;
48EXPORT_SYMBOL(vmalloc_end);
49struct page *vmem_map;
50EXPORT_SYMBOL(vmem_map);
51#endif
52
53struct page *zero_page_memmap_ptr;
54EXPORT_SYMBOL(zero_page_memmap_ptr);
55
56void
57__ia64_sync_icache_dcache (pte_t pte)
58{
59 unsigned long addr;
60 struct page *page;
61 unsigned long order;
62
63 page = pte_page(pte);
64 addr = (unsigned long) page_address(page);
65
66 if (test_bit(PG_arch_1, &page->flags))
67 return;
68
69 if (PageCompound(page)) {
70 order = compound_order(page);
71 flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
72 }
73 else
74 flush_icache_range(addr, addr + PAGE_SIZE);
75 set_bit(PG_arch_1, &page->flags);
76}
77
78
79
80
81
82
83void
84dma_mark_clean(void *addr, size_t size)
85{
86 unsigned long pg_addr, end;
87
88 pg_addr = PAGE_ALIGN((unsigned long) addr);
89 end = (unsigned long) addr + size;
90 while (pg_addr + PAGE_SIZE <= end) {
91 struct page *page = virt_to_page(pg_addr);
92 set_bit(PG_arch_1, &page->flags);
93 pg_addr += PAGE_SIZE;
94 }
95}
96
97inline void
98ia64_set_rbs_bot (void)
99{
100 unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
101
102 if (stack_size > MAX_USER_STACK_SIZE)
103 stack_size = MAX_USER_STACK_SIZE;
104 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
105}
106
107
108
109
110
111
112
113void
114ia64_init_addr_space (void)
115{
116 struct vm_area_struct *vma;
117
118 ia64_set_rbs_bot();
119
120
121
122
123
124
125 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
126 if (vma) {
127 vma->vm_mm = current->mm;
128 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
129 vma->vm_end = vma->vm_start + PAGE_SIZE;
130 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
131 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
132 down_write(¤t->mm->mmap_sem);
133 if (insert_vm_struct(current->mm, vma)) {
134 up_write(¤t->mm->mmap_sem);
135 kmem_cache_free(vm_area_cachep, vma);
136 return;
137 }
138 up_write(¤t->mm->mmap_sem);
139 }
140
141
142 if (!(current->personality & MMAP_PAGE_ZERO)) {
143 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
144 if (vma) {
145 vma->vm_mm = current->mm;
146 vma->vm_end = PAGE_SIZE;
147 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
148 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
149 down_write(¤t->mm->mmap_sem);
150 if (insert_vm_struct(current->mm, vma)) {
151 up_write(¤t->mm->mmap_sem);
152 kmem_cache_free(vm_area_cachep, vma);
153 return;
154 }
155 up_write(¤t->mm->mmap_sem);
156 }
157 }
158}
159
160void
161free_initmem (void)
162{
163 unsigned long addr, eaddr;
164
165 addr = (unsigned long) ia64_imva(__init_begin);
166 eaddr = (unsigned long) ia64_imva(__init_end);
167 while (addr < eaddr) {
168 ClearPageReserved(virt_to_page(addr));
169 init_page_count(virt_to_page(addr));
170 free_page(addr);
171 ++totalram_pages;
172 addr += PAGE_SIZE;
173 }
174 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
175 (__init_end - __init_begin) >> 10);
176}
177
178void __init
179free_initrd_mem (unsigned long start, unsigned long end)
180{
181 struct page *page;
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213 start = PAGE_ALIGN(start);
214 end = end & PAGE_MASK;
215
216 if (start < end)
217 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
218
219 for (; start < end; start += PAGE_SIZE) {
220 if (!virt_addr_valid(start))
221 continue;
222 page = virt_to_page(start);
223 ClearPageReserved(page);
224 init_page_count(page);
225 free_page(start);
226 ++totalram_pages;
227 }
228}
229
230
231
232
233static struct page * __init
234put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
235{
236 pgd_t *pgd;
237 pud_t *pud;
238 pmd_t *pmd;
239 pte_t *pte;
240
241 if (!PageReserved(page))
242 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
243 page_address(page));
244
245 pgd = pgd_offset_k(address);
246
247 {
248 pud = pud_alloc(&init_mm, pgd, address);
249 if (!pud)
250 goto out;
251 pmd = pmd_alloc(&init_mm, pud, address);
252 if (!pmd)
253 goto out;
254 pte = pte_alloc_kernel(pmd, address);
255 if (!pte)
256 goto out;
257 if (!pte_none(*pte))
258 goto out;
259 set_pte(pte, mk_pte(page, pgprot));
260 }
261 out:
262
263 return page;
264}
265
266static void __init
267setup_gate (void)
268{
269 struct page *page;
270
271
272
273
274
275
276 page = virt_to_page(ia64_imva(__start_gate_section));
277 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
278#ifdef HAVE_BUGGY_SEGREL
279 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
280 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
281#else
282 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
283
284 {
285 unsigned long addr;
286
287 for (addr = GATE_ADDR + PAGE_SIZE;
288 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
289 addr += PAGE_SIZE)
290 {
291 put_kernel_page(ZERO_PAGE(0), addr,
292 PAGE_READONLY);
293 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
294 PAGE_READONLY);
295 }
296 }
297#endif
298 ia64_patch_gate();
299}
300
301void __devinit
302ia64_mmu_init (void *my_cpu_data)
303{
304 unsigned long pta, impl_va_bits;
305 extern void __devinit tlb_init (void);
306
307#ifdef CONFIG_DISABLE_VHPT
308# define VHPT_ENABLE_BIT 0
309#else
310# define VHPT_ENABLE_BIT 1
311#endif
312
313
314
315
316
317
318
319
320
321
322
323
324# define pte_bits 3
325# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
326
327
328
329
330
331
332
333# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
334# define POW2(n) (1ULL << (n))
335
336 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
337
338 if (impl_va_bits < 51 || impl_va_bits > 61)
339 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
340
341
342
343
344
345
346 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
347 (mapped_space_bits > impl_va_bits - 1))
348 panic("Cannot build a big enough virtual-linear page table"
349 " to cover mapped address space.\n"
350 " Try using a smaller page size.\n");
351
352
353
354 pta = POW2(61) - POW2(vmlpt_bits);
355
356
357
358
359
360
361
362 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
363
364 ia64_tlb_init();
365
366#ifdef CONFIG_HUGETLB_PAGE
367 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
368 ia64_srlz_d();
369#endif
370}
371
372#ifdef CONFIG_VIRTUAL_MEM_MAP
373int vmemmap_find_next_valid_pfn(int node, int i)
374{
375 unsigned long end_address, hole_next_pfn;
376 unsigned long stop_address;
377 pg_data_t *pgdat = NODE_DATA(node);
378
379 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
380 end_address = PAGE_ALIGN(end_address);
381
382 stop_address = (unsigned long) &vmem_map[
383 pgdat->node_start_pfn + pgdat->node_spanned_pages];
384
385 do {
386 pgd_t *pgd;
387 pud_t *pud;
388 pmd_t *pmd;
389 pte_t *pte;
390
391 pgd = pgd_offset_k(end_address);
392 if (pgd_none(*pgd)) {
393 end_address += PGDIR_SIZE;
394 continue;
395 }
396
397 pud = pud_offset(pgd, end_address);
398 if (pud_none(*pud)) {
399 end_address += PUD_SIZE;
400 continue;
401 }
402
403 pmd = pmd_offset(pud, end_address);
404 if (pmd_none(*pmd)) {
405 end_address += PMD_SIZE;
406 continue;
407 }
408
409 pte = pte_offset_kernel(pmd, end_address);
410retry_pte:
411 if (pte_none(*pte)) {
412 end_address += PAGE_SIZE;
413 pte++;
414 if ((end_address < stop_address) &&
415 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
416 goto retry_pte;
417 continue;
418 }
419
420 break;
421 } while (end_address < stop_address);
422
423 end_address = min(end_address, stop_address);
424 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
425 hole_next_pfn = end_address / sizeof(struct page);
426 return hole_next_pfn - pgdat->node_start_pfn;
427}
428
429int __init
430create_mem_map_page_table (u64 start, u64 end, void *arg)
431{
432 unsigned long address, start_page, end_page;
433 struct page *map_start, *map_end;
434 int node;
435 pgd_t *pgd;
436 pud_t *pud;
437 pmd_t *pmd;
438 pte_t *pte;
439
440 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
441 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
442
443 start_page = (unsigned long) map_start & PAGE_MASK;
444 end_page = PAGE_ALIGN((unsigned long) map_end);
445 node = paddr_to_nid(__pa(start));
446
447 for (address = start_page; address < end_page; address += PAGE_SIZE) {
448 pgd = pgd_offset_k(address);
449 if (pgd_none(*pgd))
450 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
451 pud = pud_offset(pgd, address);
452
453 if (pud_none(*pud))
454 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
455 pmd = pmd_offset(pud, address);
456
457 if (pmd_none(*pmd))
458 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
459 pte = pte_offset_kernel(pmd, address);
460
461 if (pte_none(*pte))
462 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
463 PAGE_KERNEL));
464 }
465 return 0;
466}
467
468struct memmap_init_callback_data {
469 struct page *start;
470 struct page *end;
471 int nid;
472 unsigned long zone;
473};
474
475static int __meminit
476virtual_memmap_init (u64 start, u64 end, void *arg)
477{
478 struct memmap_init_callback_data *args;
479 struct page *map_start, *map_end;
480
481 args = (struct memmap_init_callback_data *) arg;
482 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
483 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
484
485 if (map_start < args->start)
486 map_start = args->start;
487 if (map_end > args->end)
488 map_end = args->end;
489
490
491
492
493
494
495 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
496 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
497 / sizeof(struct page));
498
499 if (map_start < map_end)
500 memmap_init_zone((unsigned long)(map_end - map_start),
501 args->nid, args->zone, page_to_pfn(map_start),
502 MEMMAP_EARLY);
503 return 0;
504}
505
506void __meminit
507memmap_init (unsigned long size, int nid, unsigned long zone,
508 unsigned long start_pfn)
509{
510 if (!vmem_map)
511 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
512 else {
513 struct page *start;
514 struct memmap_init_callback_data args;
515
516 start = pfn_to_page(start_pfn);
517 args.start = start;
518 args.end = start + size;
519 args.nid = nid;
520 args.zone = zone;
521
522 efi_memmap_walk(virtual_memmap_init, &args);
523 }
524}
525
526int
527ia64_pfn_valid (unsigned long pfn)
528{
529 char byte;
530 struct page *pg = pfn_to_page(pfn);
531
532 return (__get_user(byte, (char __user *) pg) == 0)
533 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
534 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
535}
536EXPORT_SYMBOL(ia64_pfn_valid);
537
538int __init
539find_largest_hole (u64 start, u64 end, void *arg)
540{
541 u64 *max_gap = arg;
542
543 static u64 last_end = PAGE_OFFSET;
544
545
546
547 if (*max_gap < (start - last_end))
548 *max_gap = start - last_end;
549 last_end = end;
550 return 0;
551}
552
553#endif
554
555int __init
556register_active_ranges(u64 start, u64 end, void *arg)
557{
558 int nid = paddr_to_nid(__pa(start));
559
560 if (nid < 0)
561 nid = 0;
562#ifdef CONFIG_KEXEC
563 if (start > crashk_res.start && start < crashk_res.end)
564 start = crashk_res.end;
565 if (end > crashk_res.start && end < crashk_res.end)
566 end = crashk_res.start;
567#endif
568
569 if (start < end)
570 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
571 __pa(end) >> PAGE_SHIFT);
572 return 0;
573}
574
575static int __init
576count_reserved_pages (u64 start, u64 end, void *arg)
577{
578 unsigned long num_reserved = 0;
579 unsigned long *count = arg;
580
581 for (; start < end; start += PAGE_SIZE)
582 if (PageReserved(virt_to_page(start)))
583 ++num_reserved;
584 *count += num_reserved;
585 return 0;
586}
587
588int
589find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
590{
591 unsigned long pfn_start, pfn_end;
592#ifdef CONFIG_FLATMEM
593 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
594 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
595#else
596 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
597 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
598#endif
599 min_low_pfn = min(min_low_pfn, pfn_start);
600 max_low_pfn = max(max_low_pfn, pfn_end);
601 return 0;
602}
603
604
605
606
607
608
609
610
611
612static int nolwsys __initdata;
613
614static int __init
615nolwsys_setup (char *s)
616{
617 nolwsys = 1;
618 return 1;
619}
620
621__setup("nolwsys", nolwsys_setup);
622
623void __init
624mem_init (void)
625{
626 long reserved_pages, codesize, datasize, initsize;
627 pg_data_t *pgdat;
628 int i;
629 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
630
631 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
632 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
633 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
634
635#ifdef CONFIG_PCI
636
637
638
639
640
641 platform_dma_init();
642#endif
643
644#ifdef CONFIG_FLATMEM
645 if (!mem_map)
646 BUG();
647 max_mapnr = max_low_pfn;
648#endif
649
650 high_memory = __va(max_low_pfn * PAGE_SIZE);
651
652 kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
653 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
654 kclist_add(&kcore_kernel, _stext, _end - _stext);
655
656 for_each_online_pgdat(pgdat)
657 if (pgdat->bdata->node_bootmem_map)
658 totalram_pages += free_all_bootmem_node(pgdat);
659
660 reserved_pages = 0;
661 efi_memmap_walk(count_reserved_pages, &reserved_pages);
662
663 codesize = (unsigned long) _etext - (unsigned long) _stext;
664 datasize = (unsigned long) _edata - (unsigned long) _etext;
665 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
666
667 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
668 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
669 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
670 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
671
672
673
674
675
676
677
678 for (i = 0; i < NR_syscalls; ++i) {
679 extern unsigned long fsyscall_table[NR_syscalls];
680 extern unsigned long sys_call_table[NR_syscalls];
681
682 if (!fsyscall_table[i] || nolwsys)
683 fsyscall_table[i] = sys_call_table[i] | 1;
684 }
685 setup_gate();
686
687#ifdef CONFIG_IA32_SUPPORT
688 ia32_mem_init();
689#endif
690}
691
692#ifdef CONFIG_MEMORY_HOTPLUG
693void online_page(struct page *page)
694{
695 ClearPageReserved(page);
696 init_page_count(page);
697 __free_page(page);
698 totalram_pages++;
699 num_physpages++;
700}
701
702int arch_add_memory(int nid, u64 start, u64 size)
703{
704 pg_data_t *pgdat;
705 struct zone *zone;
706 unsigned long start_pfn = start >> PAGE_SHIFT;
707 unsigned long nr_pages = size >> PAGE_SHIFT;
708 int ret;
709
710 pgdat = NODE_DATA(nid);
711
712 zone = pgdat->node_zones + ZONE_NORMAL;
713 ret = __add_pages(zone, start_pfn, nr_pages);
714
715 if (ret)
716 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
717 __FUNCTION__, ret);
718
719 return ret;
720}
721#ifdef CONFIG_MEMORY_HOTREMOVE
722int remove_memory(u64 start, u64 size)
723{
724 unsigned long start_pfn, end_pfn;
725 unsigned long timeout = 120 * HZ;
726 int ret;
727 start_pfn = start >> PAGE_SHIFT;
728 end_pfn = start_pfn + (size >> PAGE_SHIFT);
729 ret = offline_pages(start_pfn, end_pfn, timeout);
730 if (ret)
731 goto out;
732
733out:
734 return ret;
735}
736EXPORT_SYMBOL_GPL(remove_memory);
737#endif
738#endif
739