1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/smp.h>
27#include <linux/interrupt.h>
28#include <linux/init.h>
29#include <linux/tty.h>
30#include <linux/vt_kern.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/kprobes.h>
34#include <linux/hugetlb.h>
35#include <linux/syscalls.h>
36#include <linux/uaccess.h>
37#include <linux/kdebug.h>
38#include <linux/context_tracking.h>
39
40#include <asm/pgalloc.h>
41#include <asm/sections.h>
42#include <asm/traps.h>
43#include <asm/syscalls.h>
44
45#include <arch/interrupts.h>
46
47static noinline void force_sig_info_fault(const char *type, int si_signo,
48 int si_code, unsigned long address,
49 int fault_num,
50 struct task_struct *tsk,
51 struct pt_regs *regs)
52{
53 siginfo_t info;
54
55 if (unlikely(tsk->pid < 2)) {
56 panic("Signal %d (code %d) at %#lx sent to %s!",
57 si_signo, si_code & 0xffff, address,
58 is_idle_task(tsk) ? "the idle task" : "init");
59 }
60
61 info.si_signo = si_signo;
62 info.si_errno = 0;
63 info.si_code = si_code;
64 info.si_addr = (void __user *)address;
65 info.si_trapno = fault_num;
66 trace_unhandled_signal(type, regs, address, si_signo);
67 force_sig_info(si_signo, &info, tsk);
68}
69
70#ifndef __tilegx__
71
72
73
74
75SYSCALL_DEFINE1(cmpxchg_badaddr, unsigned long, address)
76{
77 struct pt_regs *regs = current_pt_regs();
78
79 if (address >= PAGE_OFFSET)
80 force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR,
81 address, INT_DTLB_MISS, current, regs);
82 else
83 force_sig_info_fault("atomic alignment fault", SIGBUS,
84 BUS_ADRALN, address,
85 INT_UNALIGN_DATA, current, regs);
86
87
88
89
90
91
92 regs->pc -= 8;
93
94
95
96
97
98
99 regs->flags |= PT_FLAGS_CALLER_SAVES;
100
101 return 0;
102}
103#endif
104
105static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
106{
107 unsigned index = pgd_index(address);
108 pgd_t *pgd_k;
109 pud_t *pud, *pud_k;
110 pmd_t *pmd, *pmd_k;
111
112 pgd += index;
113 pgd_k = init_mm.pgd + index;
114
115 if (!pgd_present(*pgd_k))
116 return NULL;
117
118 pud = pud_offset(pgd, address);
119 pud_k = pud_offset(pgd_k, address);
120 if (!pud_present(*pud_k))
121 return NULL;
122
123 pmd = pmd_offset(pud, address);
124 pmd_k = pmd_offset(pud_k, address);
125 if (!pmd_present(*pmd_k))
126 return NULL;
127 if (!pmd_present(*pmd))
128 set_pmd(pmd, *pmd_k);
129 else
130 BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
131 return pmd_k;
132}
133
134
135
136
137static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
138{
139 pmd_t *pmd_k;
140 pte_t *pte_k;
141
142
143 if (!(address >= VMALLOC_START && address < VMALLOC_END))
144 return -1;
145
146
147
148
149
150 pmd_k = vmalloc_sync_one(pgd, address);
151 if (!pmd_k)
152 return -1;
153 pte_k = pte_offset_kernel(pmd_k, address);
154 if (!pte_present(*pte_k))
155 return -1;
156 return 0;
157}
158
159
160static void wait_for_migration(pte_t *pte)
161{
162 if (pte_migrating(*pte)) {
163
164
165
166
167
168 int retries = 0;
169 int bound = get_clock_rate();
170 while (pte_migrating(*pte)) {
171 barrier();
172 if (++retries > bound)
173 panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating",
174 pte->val, pte_pfn(*pte));
175 }
176 }
177}
178
179
180
181
182
183
184static pgd_t *get_current_pgd(void)
185{
186 HV_Context ctx = hv_inquire_context();
187 unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
188 struct page *pgd_page = pfn_to_page(pgd_pfn);
189 BUG_ON(PageHighMem(pgd_page));
190 return (pgd_t *) __va(ctx.page_table);
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211static int handle_migrating_pte(pgd_t *pgd, int fault_num,
212 unsigned long address, unsigned long pc,
213 int is_kernel_mode, int write)
214{
215 pud_t *pud;
216 pmd_t *pmd;
217 pte_t *pte;
218 pte_t pteval;
219
220 if (pgd_addr_invalid(address))
221 return 0;
222
223 pgd += pgd_index(address);
224 pud = pud_offset(pgd, address);
225 if (!pud || !pud_present(*pud))
226 return 0;
227 pmd = pmd_offset(pud, address);
228 if (!pmd || !pmd_present(*pmd))
229 return 0;
230 pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
231 pte_offset_kernel(pmd, address);
232 pteval = *pte;
233 if (pte_migrating(pteval)) {
234 if (in_nmi() && search_exception_tables(pc))
235 return 0;
236 wait_for_migration(pte);
237 return 1;
238 }
239
240 if (!is_kernel_mode || !pte_present(pteval))
241 return 0;
242 if (fault_num == INT_ITLB_MISS) {
243 if (pte_exec(pteval))
244 return 1;
245 } else if (write) {
246 if (pte_write(pteval))
247 return 1;
248 } else {
249 if (pte_read(pteval))
250 return 1;
251 }
252
253 return 0;
254}
255
256
257
258
259
260
261static int handle_page_fault(struct pt_regs *regs,
262 int fault_num,
263 int is_page_fault,
264 unsigned long address,
265 int write)
266{
267 struct task_struct *tsk;
268 struct mm_struct *mm;
269 struct vm_area_struct *vma;
270 unsigned long stack_offset;
271 int fault;
272 int si_code;
273 int is_kernel_mode;
274 pgd_t *pgd;
275 unsigned int flags;
276
277
278 if (!is_page_fault)
279 write = 1;
280
281 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
282
283 is_kernel_mode = !user_mode(regs);
284
285 tsk = validate_current();
286
287
288
289
290
291
292
293 stack_offset = stack_pointer & (THREAD_SIZE-1);
294 if (stack_offset < THREAD_SIZE / 8) {
295 pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer);
296 show_regs(regs);
297 pr_alert("Killing current process %d/%s\n",
298 tsk->pid, tsk->comm);
299 do_group_exit(SIGKILL);
300 }
301
302
303
304
305
306
307
308
309
310 pgd = get_current_pgd();
311 if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
312 is_kernel_mode, write))
313 return 1;
314
315 si_code = SEGV_MAPERR;
316
317
318
319
320
321
322
323
324
325
326
327
328
329 if (unlikely(address >= TASK_SIZE &&
330 !is_arch_mappable_range(address, 0))) {
331 if (is_kernel_mode && is_page_fault &&
332 vmalloc_fault(pgd, address) >= 0)
333 return 1;
334
335
336
337
338 mm = NULL;
339 vma = NULL;
340 goto bad_area_nosemaphore;
341 }
342
343
344
345
346
347
348
349
350 if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
351 local_irq_enable();
352
353 mm = tsk->mm;
354
355
356
357
358
359 if (pagefault_disabled() || !mm) {
360 vma = NULL;
361 goto bad_area_nosemaphore;
362 }
363
364 if (!is_kernel_mode)
365 flags |= FAULT_FLAG_USER;
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383 if (!down_read_trylock(&mm->mmap_sem)) {
384 if (is_kernel_mode &&
385 !search_exception_tables(regs->pc)) {
386 vma = NULL;
387 goto bad_area_nosemaphore;
388 }
389
390retry:
391 down_read(&mm->mmap_sem);
392 }
393
394 vma = find_vma(mm, address);
395 if (!vma)
396 goto bad_area;
397 if (vma->vm_start <= address)
398 goto good_area;
399 if (!(vma->vm_flags & VM_GROWSDOWN))
400 goto bad_area;
401 if (regs->sp < PAGE_OFFSET) {
402
403
404
405 if (address < regs->sp)
406 goto bad_area;
407 }
408 if (expand_stack(vma, address))
409 goto bad_area;
410
411
412
413
414
415good_area:
416 si_code = SEGV_ACCERR;
417 if (fault_num == INT_ITLB_MISS) {
418 if (!(vma->vm_flags & VM_EXEC))
419 goto bad_area;
420 } else if (write) {
421#ifdef TEST_VERIFY_AREA
422 if (!is_page_fault && regs->cs == KERNEL_CS)
423 pr_err("WP fault at " REGFMT "\n", regs->eip);
424#endif
425 if (!(vma->vm_flags & VM_WRITE))
426 goto bad_area;
427 flags |= FAULT_FLAG_WRITE;
428 } else {
429 if (!is_page_fault || !(vma->vm_flags & VM_READ))
430 goto bad_area;
431 }
432
433
434
435
436
437
438 fault = handle_mm_fault(mm, vma, address, flags);
439
440 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
441 return 0;
442
443 if (unlikely(fault & VM_FAULT_ERROR)) {
444 if (fault & VM_FAULT_OOM)
445 goto out_of_memory;
446 else if (fault & VM_FAULT_SIGSEGV)
447 goto bad_area;
448 else if (fault & VM_FAULT_SIGBUS)
449 goto do_sigbus;
450 BUG();
451 }
452 if (flags & FAULT_FLAG_ALLOW_RETRY) {
453 if (fault & VM_FAULT_MAJOR)
454 tsk->maj_flt++;
455 else
456 tsk->min_flt++;
457 if (fault & VM_FAULT_RETRY) {
458 flags &= ~FAULT_FLAG_ALLOW_RETRY;
459 flags |= FAULT_FLAG_TRIED;
460
461
462
463
464
465
466 goto retry;
467 }
468 }
469
470#if CHIP_HAS_TILE_DMA()
471
472 switch (fault_num) {
473 case INT_DMATLB_MISS:
474 case INT_DMATLB_MISS_DWNCL:
475 case INT_DMATLB_ACCESS:
476 case INT_DMATLB_ACCESS_DWNCL:
477 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
478 break;
479 }
480#endif
481
482 up_read(&mm->mmap_sem);
483 return 1;
484
485
486
487
488
489bad_area:
490 up_read(&mm->mmap_sem);
491
492bad_area_nosemaphore:
493
494 if (!is_kernel_mode) {
495
496
497
498 local_irq_enable();
499
500 force_sig_info_fault("segfault", SIGSEGV, si_code, address,
501 fault_num, tsk, regs);
502 return 0;
503 }
504
505no_context:
506
507 if (fixup_exception(regs))
508 return 0;
509
510
511
512
513
514
515 bust_spinlocks(1);
516
517
518#ifdef SUPPORT_LOOKUP_ADDRESS
519 if (fault_num == INT_ITLB_MISS) {
520 pte_t *pte = lookup_address(address);
521
522 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
523 pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n",
524 current->uid);
525 }
526#endif
527 if (address < PAGE_SIZE)
528 pr_alert("Unable to handle kernel NULL pointer dereference\n");
529 else
530 pr_alert("Unable to handle kernel paging request\n");
531 pr_alert(" at virtual address " REGFMT ", pc " REGFMT "\n",
532 address, regs->pc);
533
534 show_regs(regs);
535
536 if (unlikely(tsk->pid < 2)) {
537 panic("Kernel page fault running %s!",
538 is_idle_task(tsk) ? "the idle task" : "init");
539 }
540
541
542
543
544
545#ifdef SUPPORT_DIE
546 die("Oops", regs);
547#endif
548 bust_spinlocks(1);
549
550 do_group_exit(SIGKILL);
551
552
553
554
555
556out_of_memory:
557 up_read(&mm->mmap_sem);
558 if (is_kernel_mode)
559 goto no_context;
560 pagefault_out_of_memory();
561 return 0;
562
563do_sigbus:
564 up_read(&mm->mmap_sem);
565
566
567 if (is_kernel_mode)
568 goto no_context;
569
570 force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
571 fault_num, tsk, regs);
572 return 0;
573}
574
575#ifndef __tilegx__
576
577
578#define ics_panic(fmt, ...) \
579do { \
580 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
581 panic(fmt, ##__VA_ARGS__); \
582} while (0)
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
601 unsigned long address,
602 unsigned long info)
603{
604 unsigned long pc = info & ~1;
605 int write = info & 1;
606 pgd_t *pgd = get_current_pgd();
607
608
609 struct intvec_state state = {
610 do_page_fault, fault_num, address, write, 1
611 };
612
613
614 if ((pc & 0x7) != 0 || pc < PAGE_OFFSET ||
615 (fault_num != INT_DTLB_MISS &&
616 fault_num != INT_DTLB_ACCESS)) {
617 unsigned long old_pc = regs->pc;
618 regs->pc = pc;
619 ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx",
620 old_pc, fault_num, write, address);
621 }
622
623
624 if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0)
625 return state;
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643 if (pc >= (unsigned long) sys_cmpxchg &&
644 pc < (unsigned long) __sys_cmpxchg_end) {
645#ifdef CONFIG_SMP
646
647 if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) {
648 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
649 __atomic_fault_unlock(lock_ptr);
650 }
651#endif
652 regs->sp = regs->regs[27];
653 }
654
655
656
657
658
659
660
661
662 else if (pc >= (unsigned long) __start_atomic_asm_code &&
663 pc < (unsigned long) __end_atomic_asm_code) {
664 const struct exception_table_entry *fixup;
665#ifdef CONFIG_SMP
666
667 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
668 __atomic_fault_unlock(lock_ptr);
669#endif
670 fixup = search_exception_tables(pc);
671 if (!fixup)
672 ics_panic("ICS atomic fault not in table: PC %#lx, fault %d",
673 pc, fault_num);
674 regs->pc = fixup->fixup;
675 regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
676 }
677
678
679
680
681
682 if (fault_num == INT_DTLB_ACCESS)
683 write = 1;
684 if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
685 return state;
686
687
688 state.retval = 0;
689 return state;
690}
691
692#endif
693
694
695
696
697
698
699
700
701
702static inline void __do_page_fault(struct pt_regs *regs, int fault_num,
703 unsigned long address, unsigned long write)
704{
705 int is_page_fault;
706
707#ifdef CONFIG_KPROBES
708
709
710
711
712
713 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
714 regs->faultnum, SIGSEGV) == NOTIFY_STOP)
715 return;
716#endif
717
718#ifdef __tilegx__
719
720
721
722
723
724
725
726
727 if (write & ~1) {
728 unsigned long pc = write & ~1;
729 if (pc >= (unsigned long) __start_unalign_asm_code &&
730 pc < (unsigned long) __end_unalign_asm_code) {
731 struct thread_info *ti = current_thread_info();
732
733
734
735
736
737
738
739
740
741
742
743 regs->sp &= ~1UL;
744 regs->regs[0] = ti->unalign_jit_tmp[0];
745 regs->regs[1] = ti->unalign_jit_tmp[1];
746 regs->regs[2] = ti->unalign_jit_tmp[2];
747 regs->regs[3] = ti->unalign_jit_tmp[3];
748 write &= 1;
749 } else {
750 pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n",
751 current->comm, current->pid, pc, address);
752 show_regs(regs);
753 do_group_exit(SIGKILL);
754 }
755 }
756#else
757
758 BUG_ON(write & ~1);
759#endif
760
761#if CHIP_HAS_TILE_DMA()
762
763
764
765
766
767
768
769 if (fault_num == INT_DMATLB_MISS ||
770 fault_num == INT_DMATLB_ACCESS ||
771 fault_num == INT_DMATLB_MISS_DWNCL ||
772 fault_num == INT_DMATLB_ACCESS_DWNCL) {
773 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
774 while (__insn_mfspr(SPR_DMA_USER_STATUS) &
775 SPR_DMA_STATUS__BUSY_MASK)
776 ;
777 }
778#endif
779
780
781 switch (fault_num) {
782 case INT_ITLB_MISS:
783 case INT_DTLB_MISS:
784#if CHIP_HAS_TILE_DMA()
785 case INT_DMATLB_MISS:
786 case INT_DMATLB_MISS_DWNCL:
787#endif
788 is_page_fault = 1;
789 break;
790
791 case INT_DTLB_ACCESS:
792#if CHIP_HAS_TILE_DMA()
793 case INT_DMATLB_ACCESS:
794 case INT_DMATLB_ACCESS_DWNCL:
795#endif
796 is_page_fault = 0;
797 break;
798
799 default:
800 panic("Bad fault number %d in do_page_fault", fault_num);
801 }
802
803#if CHIP_HAS_TILE_DMA()
804 if (!user_mode(regs)) {
805 struct async_tlb *async;
806 switch (fault_num) {
807#if CHIP_HAS_TILE_DMA()
808 case INT_DMATLB_MISS:
809 case INT_DMATLB_ACCESS:
810 case INT_DMATLB_MISS_DWNCL:
811 case INT_DMATLB_ACCESS_DWNCL:
812 async = ¤t->thread.dma_async_tlb;
813 break;
814#endif
815 default:
816 async = NULL;
817 }
818 if (async) {
819
820
821
822
823
824 local_irq_enable();
825
826 set_thread_flag(TIF_ASYNC_TLB);
827 if (async->fault_num != 0) {
828 panic("Second async fault %d; old fault was %d (%#lx/%ld)",
829 fault_num, async->fault_num,
830 address, write);
831 }
832 BUG_ON(fault_num == 0);
833 async->fault_num = fault_num;
834 async->is_fault = is_page_fault;
835 async->is_write = write;
836 async->address = address;
837 return;
838 }
839 }
840#endif
841
842 handle_page_fault(regs, fault_num, is_page_fault, address, write);
843}
844
845void do_page_fault(struct pt_regs *regs, int fault_num,
846 unsigned long address, unsigned long write)
847{
848 enum ctx_state prev_state = exception_enter();
849 __do_page_fault(regs, fault_num, address, write);
850 exception_exit(prev_state);
851}
852
853#if CHIP_HAS_TILE_DMA()
854
855
856
857
858void do_async_page_fault(struct pt_regs *regs)
859{
860 struct async_tlb *async = ¤t->thread.dma_async_tlb;
861
862
863
864
865
866
867 clear_thread_flag(TIF_ASYNC_TLB);
868
869 if (async->fault_num) {
870
871
872
873
874
875
876 int fault_num = async->fault_num;
877 async->fault_num = 0;
878 handle_page_fault(regs, fault_num, async->is_fault,
879 async->address, async->is_write);
880 }
881}
882#endif
883
884
885void vmalloc_sync_all(void)
886{
887#ifdef __tilegx__
888
889 BUILD_BUG_ON(pgd_index(VMALLOC_END - PAGE_SIZE) !=
890 pgd_index(VMALLOC_START));
891#else
892
893
894
895
896
897
898 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
899 static unsigned long start = PAGE_OFFSET;
900 unsigned long address;
901
902 BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK);
903 for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) {
904 if (!test_bit(pgd_index(address), insync)) {
905 unsigned long flags;
906 struct list_head *pos;
907
908 spin_lock_irqsave(&pgd_lock, flags);
909 list_for_each(pos, &pgd_list)
910 if (!vmalloc_sync_one(list_to_pgd(pos),
911 address)) {
912
913 BUG_ON(pos != pgd_list.next);
914 break;
915 }
916 spin_unlock_irqrestore(&pgd_lock, flags);
917 if (pos != pgd_list.next)
918 set_bit(pgd_index(address), insync);
919 }
920 if (address == start && test_bit(pgd_index(address), insync))
921 start = address + PGDIR_SIZE;
922 }
923#endif
924}
925