1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel_stat.h>
12#include <linux/perf_event.h>
13#include <linux/signal.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/types.h>
19#include <linux/ptrace.h>
20#include <linux/mman.h>
21#include <linux/mm.h>
22#include <linux/compat.h>
23#include <linux/smp.h>
24#include <linux/kdebug.h>
25#include <linux/init.h>
26#include <linux/console.h>
27#include <linux/extable.h>
28#include <linux/hardirq.h>
29#include <linux/kprobes.h>
30#include <linux/uaccess.h>
31#include <linux/hugetlb.h>
32#include <asm/asm-offsets.h>
33#include <asm/diag.h>
34#include <asm/pgtable.h>
35#include <asm/gmap.h>
36#include <asm/irq.h>
37#include <asm/mmu_context.h>
38#include <asm/facility.h>
39#include "../kernel/entry.h"
40
41#define __FAIL_ADDR_MASK -4096L
42#define __SUBCODE_MASK 0x0600
43#define __PF_RES_FIELD 0x8000000000000000ULL
44
45#define VM_FAULT_BADCONTEXT 0x010000
46#define VM_FAULT_BADMAP 0x020000
47#define VM_FAULT_BADACCESS 0x040000
48#define VM_FAULT_SIGNAL 0x080000
49#define VM_FAULT_PFAULT 0x100000
50
51static unsigned long store_indication __read_mostly;
52
53static int __init fault_init(void)
54{
55 if (test_facility(75))
56 store_indication = 0xc00;
57 return 0;
58}
59early_initcall(fault_init);
60
61static inline int notify_page_fault(struct pt_regs *regs)
62{
63 int ret = 0;
64
65
66 if (kprobes_built_in() && !user_mode(regs)) {
67 preempt_disable();
68 if (kprobe_running() && kprobe_fault_handler(regs, 14))
69 ret = 1;
70 preempt_enable();
71 }
72 return ret;
73}
74
75
76
77
78
79
80void bust_spinlocks(int yes)
81{
82 if (yes) {
83 oops_in_progress = 1;
84 } else {
85 int loglevel_save = console_loglevel;
86 console_unblank();
87 oops_in_progress = 0;
88
89
90
91
92
93 console_loglevel = 15;
94 printk(" ");
95 console_loglevel = loglevel_save;
96 }
97}
98
99
100
101
102
103static inline int user_space_fault(struct pt_regs *regs)
104{
105 unsigned long trans_exc_code;
106
107
108
109
110
111 trans_exc_code = regs->int_parm_long & 3;
112 if (trans_exc_code == 3)
113 return 0;
114 if (user_mode(regs))
115 return 1;
116 if (trans_exc_code == 2)
117 return current->thread.mm_segment.ar4;
118 if (current->flags & PF_VCPU)
119 return 1;
120 return 0;
121}
122
123static int bad_address(void *p)
124{
125 unsigned long dummy;
126
127 return probe_kernel_address((unsigned long *)p, dummy);
128}
129
130static void dump_pagetable(unsigned long asce, unsigned long address)
131{
132 unsigned long *table = __va(asce & PAGE_MASK);
133
134 pr_alert("AS:%016lx ", asce);
135 switch (asce & _ASCE_TYPE_MASK) {
136 case _ASCE_TYPE_REGION1:
137 table = table + ((address >> 53) & 0x7ff);
138 if (bad_address(table))
139 goto bad;
140 pr_cont("R1:%016lx ", *table);
141 if (*table & _REGION_ENTRY_INVALID)
142 goto out;
143 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
144
145 case _ASCE_TYPE_REGION2:
146 table = table + ((address >> 42) & 0x7ff);
147 if (bad_address(table))
148 goto bad;
149 pr_cont("R2:%016lx ", *table);
150 if (*table & _REGION_ENTRY_INVALID)
151 goto out;
152 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
153
154 case _ASCE_TYPE_REGION3:
155 table = table + ((address >> 31) & 0x7ff);
156 if (bad_address(table))
157 goto bad;
158 pr_cont("R3:%016lx ", *table);
159 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
160 goto out;
161 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
162
163 case _ASCE_TYPE_SEGMENT:
164 table = table + ((address >> 20) & 0x7ff);
165 if (bad_address(table))
166 goto bad;
167 pr_cont("S:%016lx ", *table);
168 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
169 goto out;
170 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
171 }
172 table = table + ((address >> 12) & 0xff);
173 if (bad_address(table))
174 goto bad;
175 pr_cont("P:%016lx ", *table);
176out:
177 pr_cont("\n");
178 return;
179bad:
180 pr_cont("BAD\n");
181}
182
183static void dump_fault_info(struct pt_regs *regs)
184{
185 unsigned long asce;
186
187 pr_alert("Failing address: %016lx TEID: %016lx\n",
188 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
189 pr_alert("Fault in ");
190 switch (regs->int_parm_long & 3) {
191 case 3:
192 pr_cont("home space ");
193 break;
194 case 2:
195 pr_cont("secondary space ");
196 break;
197 case 1:
198 pr_cont("access register ");
199 break;
200 case 0:
201 pr_cont("primary space ");
202 break;
203 }
204 pr_cont("mode while using ");
205 if (!user_space_fault(regs)) {
206 asce = S390_lowcore.kernel_asce;
207 pr_cont("kernel ");
208 }
209#ifdef CONFIG_PGSTE
210 else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
211 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
212 asce = gmap->asce;
213 pr_cont("gmap ");
214 }
215#endif
216 else {
217 asce = S390_lowcore.user_asce;
218 pr_cont("user ");
219 }
220 pr_cont("ASCE.\n");
221 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
222}
223
224int show_unhandled_signals = 1;
225
226void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
227{
228 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
229 return;
230 if (!unhandled_signal(current, signr))
231 return;
232 if (!printk_ratelimit())
233 return;
234 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
235 regs->int_code & 0xffff, regs->int_code >> 17);
236 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
237 printk(KERN_CONT "\n");
238 if (is_mm_fault)
239 dump_fault_info(regs);
240 show_regs(regs);
241}
242
243
244
245
246
247static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
248{
249 struct siginfo si;
250
251 report_user_fault(regs, SIGSEGV, 1);
252 si.si_signo = SIGSEGV;
253 si.si_errno = 0;
254 si.si_code = si_code;
255 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
256 force_sig_info(SIGSEGV, &si, current);
257}
258
259static noinline void do_no_context(struct pt_regs *regs)
260{
261 const struct exception_table_entry *fixup;
262
263
264 fixup = search_exception_tables(regs->psw.addr);
265 if (fixup) {
266 regs->psw.addr = extable_fixup(fixup);
267 return;
268 }
269
270
271
272
273
274 if (!user_space_fault(regs))
275 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
276 " in virtual kernel address space\n");
277 else
278 printk(KERN_ALERT "Unable to handle kernel paging request"
279 " in virtual user address space\n");
280 dump_fault_info(regs);
281 die(regs, "Oops");
282 do_exit(SIGKILL);
283}
284
285static noinline void do_low_address(struct pt_regs *regs)
286{
287
288
289 if (regs->psw.mask & PSW_MASK_PSTATE) {
290
291 die (regs, "Low-address protection");
292 do_exit(SIGKILL);
293 }
294
295 do_no_context(regs);
296}
297
298static noinline void do_sigbus(struct pt_regs *regs)
299{
300 struct task_struct *tsk = current;
301 struct siginfo si;
302
303
304
305
306
307 si.si_signo = SIGBUS;
308 si.si_errno = 0;
309 si.si_code = BUS_ADRERR;
310 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
311 force_sig_info(SIGBUS, &si, tsk);
312}
313
314static noinline void do_fault_error(struct pt_regs *regs, int fault)
315{
316 int si_code;
317
318 switch (fault) {
319 case VM_FAULT_BADACCESS:
320 case VM_FAULT_BADMAP:
321
322 if (user_mode(regs)) {
323
324 si_code = (fault == VM_FAULT_BADMAP) ?
325 SEGV_MAPERR : SEGV_ACCERR;
326 do_sigsegv(regs, si_code);
327 return;
328 }
329 case VM_FAULT_BADCONTEXT:
330 case VM_FAULT_PFAULT:
331 do_no_context(regs);
332 break;
333 case VM_FAULT_SIGNAL:
334 if (!user_mode(regs))
335 do_no_context(regs);
336 break;
337 default:
338 if (fault & VM_FAULT_OOM) {
339 if (!user_mode(regs))
340 do_no_context(regs);
341 else
342 pagefault_out_of_memory();
343 } else if (fault & VM_FAULT_SIGSEGV) {
344
345 if (!user_mode(regs))
346 do_no_context(regs);
347 else
348 do_sigsegv(regs, SEGV_MAPERR);
349 } else if (fault & VM_FAULT_SIGBUS) {
350
351 if (!user_mode(regs))
352 do_no_context(regs);
353 else
354 do_sigbus(regs);
355 } else
356 BUG();
357 break;
358 }
359}
360
361
362
363
364
365
366
367
368
369
370
371
372static inline int do_exception(struct pt_regs *regs, int access)
373{
374#ifdef CONFIG_PGSTE
375 struct gmap *gmap;
376#endif
377 struct task_struct *tsk;
378 struct mm_struct *mm;
379 struct vm_area_struct *vma;
380 unsigned long trans_exc_code;
381 unsigned long address;
382 unsigned int flags;
383 int fault;
384
385 tsk = current;
386
387
388
389
390 clear_pt_regs_flag(regs, PIF_PER_TRAP);
391
392 if (notify_page_fault(regs))
393 return 0;
394
395 mm = tsk->mm;
396 trans_exc_code = regs->int_parm_long;
397
398
399
400
401
402
403 fault = VM_FAULT_BADCONTEXT;
404 if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
405 goto out;
406
407 address = trans_exc_code & __FAIL_ADDR_MASK;
408 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
409 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
410 if (user_mode(regs))
411 flags |= FAULT_FLAG_USER;
412 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
413 flags |= FAULT_FLAG_WRITE;
414 down_read(&mm->mmap_sem);
415
416#ifdef CONFIG_PGSTE
417 gmap = (current->flags & PF_VCPU) ?
418 (struct gmap *) S390_lowcore.gmap : NULL;
419 if (gmap) {
420 current->thread.gmap_addr = address;
421 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
422 current->thread.gmap_int_code = regs->int_code & 0xffff;
423 address = __gmap_translate(gmap, address);
424 if (address == -EFAULT) {
425 fault = VM_FAULT_BADMAP;
426 goto out_up;
427 }
428 if (gmap->pfault_enabled)
429 flags |= FAULT_FLAG_RETRY_NOWAIT;
430 }
431#endif
432
433retry:
434 fault = VM_FAULT_BADMAP;
435 vma = find_vma(mm, address);
436 if (!vma)
437 goto out_up;
438
439 if (unlikely(vma->vm_start > address)) {
440 if (!(vma->vm_flags & VM_GROWSDOWN))
441 goto out_up;
442 if (expand_stack(vma, address))
443 goto out_up;
444 }
445
446
447
448
449
450 fault = VM_FAULT_BADACCESS;
451 if (unlikely(!(vma->vm_flags & access)))
452 goto out_up;
453
454 if (is_vm_hugetlb_page(vma))
455 address &= HPAGE_MASK;
456
457
458
459
460
461 fault = handle_mm_fault(vma, address, flags);
462
463 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
464 fault = VM_FAULT_SIGNAL;
465 goto out;
466 }
467 if (unlikely(fault & VM_FAULT_ERROR))
468 goto out_up;
469
470
471
472
473
474
475 if (flags & FAULT_FLAG_ALLOW_RETRY) {
476 if (fault & VM_FAULT_MAJOR) {
477 tsk->maj_flt++;
478 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
479 regs, address);
480 } else {
481 tsk->min_flt++;
482 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
483 regs, address);
484 }
485 if (fault & VM_FAULT_RETRY) {
486#ifdef CONFIG_PGSTE
487 if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
488
489
490 current->thread.gmap_pfault = 1;
491 fault = VM_FAULT_PFAULT;
492 goto out_up;
493 }
494#endif
495
496
497 flags &= ~(FAULT_FLAG_ALLOW_RETRY |
498 FAULT_FLAG_RETRY_NOWAIT);
499 flags |= FAULT_FLAG_TRIED;
500 down_read(&mm->mmap_sem);
501 goto retry;
502 }
503 }
504#ifdef CONFIG_PGSTE
505 if (gmap) {
506 address = __gmap_link(gmap, current->thread.gmap_addr,
507 address);
508 if (address == -EFAULT) {
509 fault = VM_FAULT_BADMAP;
510 goto out_up;
511 }
512 if (address == -ENOMEM) {
513 fault = VM_FAULT_OOM;
514 goto out_up;
515 }
516 }
517#endif
518 fault = 0;
519out_up:
520 up_read(&mm->mmap_sem);
521out:
522 return fault;
523}
524
525void do_protection_exception(struct pt_regs *regs)
526{
527 unsigned long trans_exc_code;
528 int fault;
529
530 trans_exc_code = regs->int_parm_long;
531
532
533
534
535
536 if (!(regs->int_code & 0x200))
537 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
538
539
540
541
542
543 if (unlikely(!(trans_exc_code & 4))) {
544 do_low_address(regs);
545 return;
546 }
547 fault = do_exception(regs, VM_WRITE);
548 if (unlikely(fault))
549 do_fault_error(regs, fault);
550}
551NOKPROBE_SYMBOL(do_protection_exception);
552
553void do_dat_exception(struct pt_regs *regs)
554{
555 int access, fault;
556
557 access = VM_READ | VM_EXEC | VM_WRITE;
558 fault = do_exception(regs, access);
559 if (unlikely(fault))
560 do_fault_error(regs, fault);
561}
562NOKPROBE_SYMBOL(do_dat_exception);
563
564#ifdef CONFIG_PFAULT
565
566
567
568static int pfault_disable;
569
570static int __init nopfault(char *str)
571{
572 pfault_disable = 1;
573 return 1;
574}
575
576__setup("nopfault", nopfault);
577
578struct pfault_refbk {
579 u16 refdiagc;
580 u16 reffcode;
581 u16 refdwlen;
582 u16 refversn;
583 u64 refgaddr;
584 u64 refselmk;
585 u64 refcmpmk;
586 u64 reserved;
587} __attribute__ ((packed, aligned(8)));
588
589int pfault_init(void)
590{
591 struct pfault_refbk refbk = {
592 .refdiagc = 0x258,
593 .reffcode = 0,
594 .refdwlen = 5,
595 .refversn = 2,
596 .refgaddr = __LC_LPP,
597 .refselmk = 1ULL << 48,
598 .refcmpmk = 1ULL << 48,
599 .reserved = __PF_RES_FIELD };
600 int rc;
601
602 if (pfault_disable)
603 return -1;
604 diag_stat_inc(DIAG_STAT_X258);
605 asm volatile(
606 " diag %1,%0,0x258\n"
607 "0: j 2f\n"
608 "1: la %0,8\n"
609 "2:\n"
610 EX_TABLE(0b,1b)
611 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
612 return rc;
613}
614
615void pfault_fini(void)
616{
617 struct pfault_refbk refbk = {
618 .refdiagc = 0x258,
619 .reffcode = 1,
620 .refdwlen = 5,
621 .refversn = 2,
622 };
623
624 if (pfault_disable)
625 return;
626 diag_stat_inc(DIAG_STAT_X258);
627 asm volatile(
628 " diag %0,0,0x258\n"
629 "0: nopr %%r7\n"
630 EX_TABLE(0b,0b)
631 : : "a" (&refbk), "m" (refbk) : "cc");
632}
633
634static DEFINE_SPINLOCK(pfault_lock);
635static LIST_HEAD(pfault_list);
636
637#define PF_COMPLETE 0x0080
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660static void pfault_interrupt(struct ext_code ext_code,
661 unsigned int param32, unsigned long param64)
662{
663 struct task_struct *tsk;
664 __u16 subcode;
665 pid_t pid;
666
667
668
669
670
671
672 subcode = ext_code.subcode;
673 if ((subcode & 0xff00) != __SUBCODE_MASK)
674 return;
675 inc_irq_stat(IRQEXT_PFL);
676
677 pid = param64 & LPP_PFAULT_PID_MASK;
678 rcu_read_lock();
679 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
680 if (tsk)
681 get_task_struct(tsk);
682 rcu_read_unlock();
683 if (!tsk)
684 return;
685 spin_lock(&pfault_lock);
686 if (subcode & PF_COMPLETE) {
687
688 if (tsk->thread.pfault_wait == 1) {
689
690
691
692
693
694 tsk->thread.pfault_wait = 0;
695 list_del(&tsk->thread.list);
696 wake_up_process(tsk);
697 put_task_struct(tsk);
698 } else {
699
700
701
702
703
704
705
706 if (tsk->state == TASK_RUNNING)
707 tsk->thread.pfault_wait = -1;
708 }
709 } else {
710
711 if (WARN_ON_ONCE(tsk != current))
712 goto out;
713 if (tsk->thread.pfault_wait == 1) {
714
715 goto block;
716 } else if (tsk->thread.pfault_wait == -1) {
717
718
719
720 tsk->thread.pfault_wait = 0;
721 } else {
722
723
724
725
726
727 get_task_struct(tsk);
728 tsk->thread.pfault_wait = 1;
729 list_add(&tsk->thread.list, &pfault_list);
730block:
731
732
733
734 __set_current_state(TASK_UNINTERRUPTIBLE);
735 set_tsk_need_resched(tsk);
736 }
737 }
738out:
739 spin_unlock(&pfault_lock);
740 put_task_struct(tsk);
741}
742
743static int pfault_cpu_dead(unsigned int cpu)
744{
745 struct thread_struct *thread, *next;
746 struct task_struct *tsk;
747
748 spin_lock_irq(&pfault_lock);
749 list_for_each_entry_safe(thread, next, &pfault_list, list) {
750 thread->pfault_wait = 0;
751 list_del(&thread->list);
752 tsk = container_of(thread, struct task_struct, thread);
753 wake_up_process(tsk);
754 put_task_struct(tsk);
755 }
756 spin_unlock_irq(&pfault_lock);
757 return 0;
758}
759
760static int __init pfault_irq_init(void)
761{
762 int rc;
763
764 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
765 if (rc)
766 goto out_extint;
767 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
768 if (rc)
769 goto out_pfault;
770 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
771 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
772 NULL, pfault_cpu_dead);
773 return 0;
774
775out_pfault:
776 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
777out_extint:
778 pfault_disable = 1;
779 return rc;
780}
781early_initcall(pfault_irq_init);
782
783#endif
784