1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/smp.h>
27#include <linux/interrupt.h>
28#include <linux/init.h>
29#include <linux/tty.h>
30#include <linux/vt_kern.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/kprobes.h>
34#include <linux/hugetlb.h>
35#include <linux/syscalls.h>
36#include <linux/uaccess.h>
37#include <linux/kdebug.h>
38
39#include <asm/pgalloc.h>
40#include <asm/sections.h>
41#include <asm/traps.h>
42#include <asm/syscalls.h>
43
44#include <arch/interrupts.h>
45
46static noinline void force_sig_info_fault(const char *type, int si_signo,
47 int si_code, unsigned long address,
48 int fault_num,
49 struct task_struct *tsk,
50 struct pt_regs *regs)
51{
52 siginfo_t info;
53
54 if (unlikely(tsk->pid < 2)) {
55 panic("Signal %d (code %d) at %#lx sent to %s!",
56 si_signo, si_code & 0xffff, address,
57 is_idle_task(tsk) ? "the idle task" : "init");
58 }
59
60 info.si_signo = si_signo;
61 info.si_errno = 0;
62 info.si_code = si_code;
63 info.si_addr = (void __user *)address;
64 info.si_trapno = fault_num;
65 trace_unhandled_signal(type, regs, address, si_signo);
66 force_sig_info(si_signo, &info, tsk);
67}
68
69#ifndef __tilegx__
70
71
72
73
74SYSCALL_DEFINE1(cmpxchg_badaddr, unsigned long, address)
75{
76 struct pt_regs *regs = current_pt_regs();
77
78 if (address >= PAGE_OFFSET)
79 force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR,
80 address, INT_DTLB_MISS, current, regs);
81 else
82 force_sig_info_fault("atomic alignment fault", SIGBUS,
83 BUS_ADRALN, address,
84 INT_UNALIGN_DATA, current, regs);
85
86
87
88
89
90
91 regs->pc -= 8;
92
93
94
95
96
97
98 regs->flags |= PT_FLAGS_CALLER_SAVES;
99
100 return 0;
101}
102#endif
103
104static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
105{
106 unsigned index = pgd_index(address);
107 pgd_t *pgd_k;
108 pud_t *pud, *pud_k;
109 pmd_t *pmd, *pmd_k;
110
111 pgd += index;
112 pgd_k = init_mm.pgd + index;
113
114 if (!pgd_present(*pgd_k))
115 return NULL;
116
117 pud = pud_offset(pgd, address);
118 pud_k = pud_offset(pgd_k, address);
119 if (!pud_present(*pud_k))
120 return NULL;
121
122 pmd = pmd_offset(pud, address);
123 pmd_k = pmd_offset(pud_k, address);
124 if (!pmd_present(*pmd_k))
125 return NULL;
126 if (!pmd_present(*pmd))
127 set_pmd(pmd, *pmd_k);
128 else
129 BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
130 return pmd_k;
131}
132
133
134
135
136static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
137{
138 pmd_t *pmd_k;
139 pte_t *pte_k;
140
141
142 if (!(address >= VMALLOC_START && address < VMALLOC_END))
143 return -1;
144
145
146
147
148
149 pmd_k = vmalloc_sync_one(pgd, address);
150 if (!pmd_k)
151 return -1;
152 pte_k = pte_offset_kernel(pmd_k, address);
153 if (!pte_present(*pte_k))
154 return -1;
155 return 0;
156}
157
158
159static void wait_for_migration(pte_t *pte)
160{
161 if (pte_migrating(*pte)) {
162
163
164
165
166
167 int retries = 0;
168 int bound = get_clock_rate();
169 while (pte_migrating(*pte)) {
170 barrier();
171 if (++retries > bound)
172 panic("Hit migrating PTE (%#llx) and"
173 " page PFN %#lx still migrating",
174 pte->val, pte_pfn(*pte));
175 }
176 }
177}
178
179
180
181
182
183
184static pgd_t *get_current_pgd(void)
185{
186 HV_Context ctx = hv_inquire_context();
187 unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
188 struct page *pgd_page = pfn_to_page(pgd_pfn);
189 BUG_ON(PageHighMem(pgd_page));
190 return (pgd_t *) __va(ctx.page_table);
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211static int handle_migrating_pte(pgd_t *pgd, int fault_num,
212 unsigned long address, unsigned long pc,
213 int is_kernel_mode, int write)
214{
215 pud_t *pud;
216 pmd_t *pmd;
217 pte_t *pte;
218 pte_t pteval;
219
220 if (pgd_addr_invalid(address))
221 return 0;
222
223 pgd += pgd_index(address);
224 pud = pud_offset(pgd, address);
225 if (!pud || !pud_present(*pud))
226 return 0;
227 pmd = pmd_offset(pud, address);
228 if (!pmd || !pmd_present(*pmd))
229 return 0;
230 pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
231 pte_offset_kernel(pmd, address);
232 pteval = *pte;
233 if (pte_migrating(pteval)) {
234 if (in_nmi() && search_exception_tables(pc))
235 return 0;
236 wait_for_migration(pte);
237 return 1;
238 }
239
240 if (!is_kernel_mode || !pte_present(pteval))
241 return 0;
242 if (fault_num == INT_ITLB_MISS) {
243 if (pte_exec(pteval))
244 return 1;
245 } else if (write) {
246 if (pte_write(pteval))
247 return 1;
248 } else {
249 if (pte_read(pteval))
250 return 1;
251 }
252
253 return 0;
254}
255
256
257
258
259
260
261static int handle_page_fault(struct pt_regs *regs,
262 int fault_num,
263 int is_page_fault,
264 unsigned long address,
265 int write)
266{
267 struct task_struct *tsk;
268 struct mm_struct *mm;
269 struct vm_area_struct *vma;
270 unsigned long stack_offset;
271 int fault;
272 int si_code;
273 int is_kernel_mode;
274 pgd_t *pgd;
275 unsigned int flags;
276
277
278 if (!is_page_fault)
279 write = 1;
280
281 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
282
283 is_kernel_mode = !user_mode(regs);
284
285 tsk = validate_current();
286
287
288
289
290
291
292
293 stack_offset = stack_pointer & (THREAD_SIZE-1);
294 if (stack_offset < THREAD_SIZE / 8) {
295 pr_alert("Potential stack overrun: sp %#lx\n",
296 stack_pointer);
297 show_regs(regs);
298 pr_alert("Killing current process %d/%s\n",
299 tsk->pid, tsk->comm);
300 do_group_exit(SIGKILL);
301 }
302
303
304
305
306
307
308
309
310
311 pgd = get_current_pgd();
312 if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
313 is_kernel_mode, write))
314 return 1;
315
316 si_code = SEGV_MAPERR;
317
318
319
320
321
322
323
324
325
326
327
328
329
330 if (unlikely(address >= TASK_SIZE &&
331 !is_arch_mappable_range(address, 0))) {
332 if (is_kernel_mode && is_page_fault &&
333 vmalloc_fault(pgd, address) >= 0)
334 return 1;
335
336
337
338
339 mm = NULL;
340 vma = NULL;
341 goto bad_area_nosemaphore;
342 }
343
344
345
346
347
348
349
350
351 if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
352 local_irq_enable();
353
354 mm = tsk->mm;
355
356
357
358
359
360 if (in_atomic() || !mm) {
361 vma = NULL;
362 goto bad_area_nosemaphore;
363 }
364
365 if (!is_kernel_mode)
366 flags |= FAULT_FLAG_USER;
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384 if (!down_read_trylock(&mm->mmap_sem)) {
385 if (is_kernel_mode &&
386 !search_exception_tables(regs->pc)) {
387 vma = NULL;
388 goto bad_area_nosemaphore;
389 }
390
391retry:
392 down_read(&mm->mmap_sem);
393 }
394
395 vma = find_vma(mm, address);
396 if (!vma)
397 goto bad_area;
398 if (vma->vm_start <= address)
399 goto good_area;
400 if (!(vma->vm_flags & VM_GROWSDOWN))
401 goto bad_area;
402 if (regs->sp < PAGE_OFFSET) {
403
404
405
406 if (address < regs->sp)
407 goto bad_area;
408 }
409 if (expand_stack(vma, address))
410 goto bad_area;
411
412
413
414
415
416good_area:
417 si_code = SEGV_ACCERR;
418 if (fault_num == INT_ITLB_MISS) {
419 if (!(vma->vm_flags & VM_EXEC))
420 goto bad_area;
421 } else if (write) {
422#ifdef TEST_VERIFY_AREA
423 if (!is_page_fault && regs->cs == KERNEL_CS)
424 pr_err("WP fault at "REGFMT"\n", regs->eip);
425#endif
426 if (!(vma->vm_flags & VM_WRITE))
427 goto bad_area;
428 flags |= FAULT_FLAG_WRITE;
429 } else {
430 if (!is_page_fault || !(vma->vm_flags & VM_READ))
431 goto bad_area;
432 }
433
434
435
436
437
438
439 fault = handle_mm_fault(mm, vma, address, flags);
440
441 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
442 return 0;
443
444 if (unlikely(fault & VM_FAULT_ERROR)) {
445 if (fault & VM_FAULT_OOM)
446 goto out_of_memory;
447 else if (fault & VM_FAULT_SIGBUS)
448 goto do_sigbus;
449 BUG();
450 }
451 if (flags & FAULT_FLAG_ALLOW_RETRY) {
452 if (fault & VM_FAULT_MAJOR)
453 tsk->maj_flt++;
454 else
455 tsk->min_flt++;
456 if (fault & VM_FAULT_RETRY) {
457 flags &= ~FAULT_FLAG_ALLOW_RETRY;
458 flags |= FAULT_FLAG_TRIED;
459
460
461
462
463
464
465 goto retry;
466 }
467 }
468
469#if CHIP_HAS_TILE_DMA()
470
471 switch (fault_num) {
472 case INT_DMATLB_MISS:
473 case INT_DMATLB_MISS_DWNCL:
474 case INT_DMATLB_ACCESS:
475 case INT_DMATLB_ACCESS_DWNCL:
476 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
477 break;
478 }
479#endif
480
481 up_read(&mm->mmap_sem);
482 return 1;
483
484
485
486
487
488bad_area:
489 up_read(&mm->mmap_sem);
490
491bad_area_nosemaphore:
492
493 if (!is_kernel_mode) {
494
495
496
497 local_irq_enable();
498
499 force_sig_info_fault("segfault", SIGSEGV, si_code, address,
500 fault_num, tsk, regs);
501 return 0;
502 }
503
504no_context:
505
506 if (fixup_exception(regs))
507 return 0;
508
509
510
511
512
513
514 bust_spinlocks(1);
515
516
517#ifdef SUPPORT_LOOKUP_ADDRESS
518 if (fault_num == INT_ITLB_MISS) {
519 pte_t *pte = lookup_address(address);
520
521 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
522 pr_crit("kernel tried to execute"
523 " non-executable page - exploit attempt?"
524 " (uid: %d)\n", current->uid);
525 }
526#endif
527 if (address < PAGE_SIZE)
528 pr_alert("Unable to handle kernel NULL pointer dereference\n");
529 else
530 pr_alert("Unable to handle kernel paging request\n");
531 pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
532 address, regs->pc);
533
534 show_regs(regs);
535
536 if (unlikely(tsk->pid < 2)) {
537 panic("Kernel page fault running %s!",
538 is_idle_task(tsk) ? "the idle task" : "init");
539 }
540
541
542
543
544
545#ifdef SUPPORT_DIE
546 die("Oops", regs);
547#endif
548 bust_spinlocks(1);
549
550 do_group_exit(SIGKILL);
551
552
553
554
555
556out_of_memory:
557 up_read(&mm->mmap_sem);
558 if (is_kernel_mode)
559 goto no_context;
560 pagefault_out_of_memory();
561 return 0;
562
563do_sigbus:
564 up_read(&mm->mmap_sem);
565
566
567 if (is_kernel_mode)
568 goto no_context;
569
570 force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
571 fault_num, tsk, regs);
572 return 0;
573}
574
575#ifndef __tilegx__
576
577
578#define ics_panic(fmt, ...) do { \
579 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
580 panic(fmt, __VA_ARGS__); \
581} while (0)
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
600 unsigned long address,
601 unsigned long info)
602{
603 unsigned long pc = info & ~1;
604 int write = info & 1;
605 pgd_t *pgd = get_current_pgd();
606
607
608 struct intvec_state state = {
609 do_page_fault, fault_num, address, write, 1
610 };
611
612
613 if ((pc & 0x7) != 0 || pc < PAGE_OFFSET ||
614 (fault_num != INT_DTLB_MISS &&
615 fault_num != INT_DTLB_ACCESS)) {
616 unsigned long old_pc = regs->pc;
617 regs->pc = pc;
618 ics_panic("Bad ICS page fault args:"
619 " old PC %#lx, fault %d/%d at %#lx\n",
620 old_pc, fault_num, write, address);
621 }
622
623
624 if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0)
625 return state;
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643 if (pc >= (unsigned long) sys_cmpxchg &&
644 pc < (unsigned long) __sys_cmpxchg_end) {
645#ifdef CONFIG_SMP
646
647 if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) {
648 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
649 __atomic_fault_unlock(lock_ptr);
650 }
651#endif
652 regs->sp = regs->regs[27];
653 }
654
655
656
657
658
659
660
661
662 else if (pc >= (unsigned long) __start_atomic_asm_code &&
663 pc < (unsigned long) __end_atomic_asm_code) {
664 const struct exception_table_entry *fixup;
665#ifdef CONFIG_SMP
666
667 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
668 __atomic_fault_unlock(lock_ptr);
669#endif
670 fixup = search_exception_tables(pc);
671 if (!fixup)
672 ics_panic("ICS atomic fault not in table:"
673 " PC %#lx, fault %d", pc, fault_num);
674 regs->pc = fixup->fixup;
675 regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
676 }
677
678
679
680
681
682 if (fault_num == INT_DTLB_ACCESS)
683 write = 1;
684 if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
685 return state;
686
687
688 state.retval = 0;
689 return state;
690}
691
692#endif
693
694
695
696
697
698
699
700
701
702void do_page_fault(struct pt_regs *regs, int fault_num,
703 unsigned long address, unsigned long write)
704{
705 int is_page_fault;
706
707#ifdef CONFIG_KPROBES
708
709
710
711
712
713 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
714 regs->faultnum, SIGSEGV) == NOTIFY_STOP)
715 return;
716#endif
717
718#ifdef __tilegx__
719
720
721
722
723
724
725
726
727 if (write & ~1) {
728 unsigned long pc = write & ~1;
729 if (pc >= (unsigned long) __start_unalign_asm_code &&
730 pc < (unsigned long) __end_unalign_asm_code) {
731 struct thread_info *ti = current_thread_info();
732
733
734
735
736
737
738
739
740
741
742
743 regs->sp &= ~1UL;
744 regs->regs[0] = ti->unalign_jit_tmp[0];
745 regs->regs[1] = ti->unalign_jit_tmp[1];
746 regs->regs[2] = ti->unalign_jit_tmp[2];
747 regs->regs[3] = ti->unalign_jit_tmp[3];
748 write &= 1;
749 } else {
750 pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n",
751 current->comm, current->pid, pc, address);
752 show_regs(regs);
753 do_group_exit(SIGKILL);
754 return;
755 }
756 }
757#else
758
759 BUG_ON(write & ~1);
760#endif
761
762#if CHIP_HAS_TILE_DMA()
763
764
765
766
767
768
769
770 if (fault_num == INT_DMATLB_MISS ||
771 fault_num == INT_DMATLB_ACCESS ||
772 fault_num == INT_DMATLB_MISS_DWNCL ||
773 fault_num == INT_DMATLB_ACCESS_DWNCL) {
774 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
775 while (__insn_mfspr(SPR_DMA_USER_STATUS) &
776 SPR_DMA_STATUS__BUSY_MASK)
777 ;
778 }
779#endif
780
781
782 switch (fault_num) {
783 case INT_ITLB_MISS:
784 case INT_DTLB_MISS:
785#if CHIP_HAS_TILE_DMA()
786 case INT_DMATLB_MISS:
787 case INT_DMATLB_MISS_DWNCL:
788#endif
789 is_page_fault = 1;
790 break;
791
792 case INT_DTLB_ACCESS:
793#if CHIP_HAS_TILE_DMA()
794 case INT_DMATLB_ACCESS:
795 case INT_DMATLB_ACCESS_DWNCL:
796#endif
797 is_page_fault = 0;
798 break;
799
800 default:
801 panic("Bad fault number %d in do_page_fault", fault_num);
802 }
803
804#if CHIP_HAS_TILE_DMA()
805 if (!user_mode(regs)) {
806 struct async_tlb *async;
807 switch (fault_num) {
808#if CHIP_HAS_TILE_DMA()
809 case INT_DMATLB_MISS:
810 case INT_DMATLB_ACCESS:
811 case INT_DMATLB_MISS_DWNCL:
812 case INT_DMATLB_ACCESS_DWNCL:
813 async = ¤t->thread.dma_async_tlb;
814 break;
815#endif
816 default:
817 async = NULL;
818 }
819 if (async) {
820
821
822
823
824
825 local_irq_enable();
826
827 set_thread_flag(TIF_ASYNC_TLB);
828 if (async->fault_num != 0) {
829 panic("Second async fault %d;"
830 " old fault was %d (%#lx/%ld)",
831 fault_num, async->fault_num,
832 address, write);
833 }
834 BUG_ON(fault_num == 0);
835 async->fault_num = fault_num;
836 async->is_fault = is_page_fault;
837 async->is_write = write;
838 async->address = address;
839 return;
840 }
841 }
842#endif
843
844 handle_page_fault(regs, fault_num, is_page_fault, address, write);
845}
846
847
848#if CHIP_HAS_TILE_DMA()
849
850
851
852
853void do_async_page_fault(struct pt_regs *regs)
854{
855 struct async_tlb *async = ¤t->thread.dma_async_tlb;
856
857
858
859
860
861
862 clear_thread_flag(TIF_ASYNC_TLB);
863
864 if (async->fault_num) {
865
866
867
868
869
870
871 int fault_num = async->fault_num;
872 async->fault_num = 0;
873 handle_page_fault(regs, fault_num, async->is_fault,
874 async->address, async->is_write);
875 }
876}
877#endif
878
879
880void vmalloc_sync_all(void)
881{
882#ifdef __tilegx__
883
884 BUILD_BUG_ON(pgd_index(VMALLOC_END - PAGE_SIZE) !=
885 pgd_index(VMALLOC_START));
886#else
887
888
889
890
891
892
893 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
894 static unsigned long start = PAGE_OFFSET;
895 unsigned long address;
896
897 BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK);
898 for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) {
899 if (!test_bit(pgd_index(address), insync)) {
900 unsigned long flags;
901 struct list_head *pos;
902
903 spin_lock_irqsave(&pgd_lock, flags);
904 list_for_each(pos, &pgd_list)
905 if (!vmalloc_sync_one(list_to_pgd(pos),
906 address)) {
907
908 BUG_ON(pos != pgd_list.next);
909 break;
910 }
911 spin_unlock_irqrestore(&pgd_lock, flags);
912 if (pos != pgd_list.next)
913 set_bit(pgd_index(address), insync);
914 }
915 if (address == start && test_bit(pgd_index(address), insync))
916 start = address + PGDIR_SIZE;
917 }
918#endif
919}
920