1
2
3
4
5
6
7
8
9
10
11#include <linux/extable.h>
12#include <linux/signal.h>
13#include <linux/mm.h>
14#include <linux/hardirq.h>
15#include <linux/init.h>
16#include <linux/kprobes.h>
17#include <linux/uaccess.h>
18#include <linux/page-flags.h>
19#include <linux/sched/signal.h>
20#include <linux/sched/debug.h>
21#include <linux/highmem.h>
22#include <linux/perf_event.h>
23
24#include <asm/pgtable.h>
25#include <asm/system_misc.h>
26#include <asm/system_info.h>
27#include <asm/tlbflush.h>
28
29#include "fault.h"
30
31#ifdef CONFIG_MMU
32
33#ifdef CONFIG_KPROBES
34static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
35{
36 int ret = 0;
37
38 if (!user_mode(regs)) {
39
40 preempt_disable();
41 if (kprobe_running() && kprobe_fault_handler(regs, fsr))
42 ret = 1;
43 preempt_enable();
44 }
45
46 return ret;
47}
48#else
49static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
50{
51 return 0;
52}
53#endif
54
55
56
57
58
59void show_pte(struct mm_struct *mm, unsigned long addr)
60{
61 pgd_t *pgd;
62
63 if (!mm)
64 mm = &init_mm;
65
66 pr_alert("pgd = %p\n", mm->pgd);
67 pgd = pgd_offset(mm, addr);
68 pr_alert("[%08lx] *pgd=%08llx",
69 addr, (long long)pgd_val(*pgd));
70
71 do {
72 pud_t *pud;
73 pmd_t *pmd;
74 pte_t *pte;
75
76 if (pgd_none(*pgd))
77 break;
78
79 if (pgd_bad(*pgd)) {
80 pr_cont("(bad)");
81 break;
82 }
83
84 pud = pud_offset(pgd, addr);
85 if (PTRS_PER_PUD != 1)
86 pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
87
88 if (pud_none(*pud))
89 break;
90
91 if (pud_bad(*pud)) {
92 pr_cont("(bad)");
93 break;
94 }
95
96 pmd = pmd_offset(pud, addr);
97 if (PTRS_PER_PMD != 1)
98 pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
99
100 if (pmd_none(*pmd))
101 break;
102
103 if (pmd_bad(*pmd)) {
104 pr_cont("(bad)");
105 break;
106 }
107
108
109 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
110 break;
111
112 pte = pte_offset_map(pmd, addr);
113 pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
114#ifndef CONFIG_ARM_LPAE
115 pr_cont(", *ppte=%08llx",
116 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
117#endif
118 pte_unmap(pte);
119 } while(0);
120
121 pr_cont("\n");
122}
123#else
124void show_pte(struct mm_struct *mm, unsigned long addr)
125{ }
126#endif
127
128
129
130
131static void
132__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
133 struct pt_regs *regs)
134{
135
136
137
138 if (fixup_exception(regs))
139 return;
140
141
142
143
144 bust_spinlocks(1);
145 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
146 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
147 "paging request", addr);
148
149 show_pte(mm, addr);
150 die("Oops", regs, fsr);
151 bust_spinlocks(0);
152 do_exit(SIGKILL);
153}
154
155
156
157
158
159static void
160__do_user_fault(struct task_struct *tsk, unsigned long addr,
161 unsigned int fsr, unsigned int sig, int code,
162 struct pt_regs *regs)
163{
164 if (addr > TASK_SIZE)
165 harden_branch_predictor();
166
167#ifdef CONFIG_DEBUG_USER
168 if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
169 ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
170 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
171 tsk->comm, sig, addr, fsr);
172 show_pte(tsk->mm, addr);
173 show_regs(regs);
174 }
175#endif
176
177 tsk->thread.address = addr;
178 tsk->thread.error_code = fsr;
179 tsk->thread.trap_no = 14;
180 force_sig_fault(sig, code, (void __user *)addr, tsk);
181}
182
183void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
184{
185 struct task_struct *tsk = current;
186 struct mm_struct *mm = tsk->active_mm;
187
188
189
190
191
192 if (user_mode(regs))
193 __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
194 else
195 __do_kernel_fault(mm, addr, fsr, regs);
196}
197
198#ifdef CONFIG_MMU
199#define VM_FAULT_BADMAP 0x010000
200#define VM_FAULT_BADACCESS 0x020000
201
202
203
204
205
206
207static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
208{
209 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
210
211 if (fsr & FSR_WRITE)
212 mask = VM_WRITE;
213 if (fsr & FSR_LNX_PF)
214 mask = VM_EXEC;
215
216 return vma->vm_flags & mask ? false : true;
217}
218
219static vm_fault_t __kprobes
220__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
221 unsigned int flags, struct task_struct *tsk)
222{
223 struct vm_area_struct *vma;
224 vm_fault_t fault;
225
226 vma = find_vma(mm, addr);
227 fault = VM_FAULT_BADMAP;
228 if (unlikely(!vma))
229 goto out;
230 if (unlikely(vma->vm_start > addr))
231 goto check_stack;
232
233
234
235
236
237good_area:
238 if (access_error(fsr, vma)) {
239 fault = VM_FAULT_BADACCESS;
240 goto out;
241 }
242
243 return handle_mm_fault(vma, addr & PAGE_MASK, flags);
244
245check_stack:
246
247 if (vma->vm_flags & VM_GROWSDOWN &&
248 addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
249 goto good_area;
250out:
251 return fault;
252}
253
254static int __kprobes
255do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
256{
257 struct task_struct *tsk;
258 struct mm_struct *mm;
259 int sig, code;
260 vm_fault_t fault;
261 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
262
263 if (notify_page_fault(regs, fsr))
264 return 0;
265
266 tsk = current;
267 mm = tsk->mm;
268
269
270 if (interrupts_enabled(regs))
271 local_irq_enable();
272
273
274
275
276
277 if (faulthandler_disabled() || !mm)
278 goto no_context;
279
280 if (user_mode(regs))
281 flags |= FAULT_FLAG_USER;
282 if (fsr & FSR_WRITE)
283 flags |= FAULT_FLAG_WRITE;
284
285
286
287
288
289
290 if (!down_read_trylock(&mm->mmap_sem)) {
291 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
292 goto no_context;
293retry:
294 down_read(&mm->mmap_sem);
295 } else {
296
297
298
299
300
301 might_sleep();
302#ifdef CONFIG_DEBUG_VM
303 if (!user_mode(regs) &&
304 !search_exception_tables(regs->ARM_pc))
305 goto no_context;
306#endif
307 }
308
309 fault = __do_page_fault(mm, addr, fsr, flags, tsk);
310
311
312
313
314
315 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
316 if (!user_mode(regs))
317 goto no_context;
318 return 0;
319 }
320
321
322
323
324
325
326
327 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
328 if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
329 if (fault & VM_FAULT_MAJOR) {
330 tsk->maj_flt++;
331 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
332 regs, addr);
333 } else {
334 tsk->min_flt++;
335 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
336 regs, addr);
337 }
338 if (fault & VM_FAULT_RETRY) {
339
340
341 flags &= ~FAULT_FLAG_ALLOW_RETRY;
342 flags |= FAULT_FLAG_TRIED;
343 goto retry;
344 }
345 }
346
347 up_read(&mm->mmap_sem);
348
349
350
351
352 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
353 return 0;
354
355
356
357
358
359 if (!user_mode(regs))
360 goto no_context;
361
362 if (fault & VM_FAULT_OOM) {
363
364
365
366
367
368 pagefault_out_of_memory();
369 return 0;
370 }
371
372 if (fault & VM_FAULT_SIGBUS) {
373
374
375
376
377 sig = SIGBUS;
378 code = BUS_ADRERR;
379 } else {
380
381
382
383
384 sig = SIGSEGV;
385 code = fault == VM_FAULT_BADACCESS ?
386 SEGV_ACCERR : SEGV_MAPERR;
387 }
388
389 __do_user_fault(tsk, addr, fsr, sig, code, regs);
390 return 0;
391
392no_context:
393 __do_kernel_fault(mm, addr, fsr, regs);
394 return 0;
395}
396#else
397static int
398do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
399{
400 return 0;
401}
402#endif
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421#ifdef CONFIG_MMU
422static int __kprobes
423do_translation_fault(unsigned long addr, unsigned int fsr,
424 struct pt_regs *regs)
425{
426 unsigned int index;
427 pgd_t *pgd, *pgd_k;
428 pud_t *pud, *pud_k;
429 pmd_t *pmd, *pmd_k;
430
431 if (addr < TASK_SIZE)
432 return do_page_fault(addr, fsr, regs);
433
434 if (user_mode(regs))
435 goto bad_area;
436
437 index = pgd_index(addr);
438
439 pgd = cpu_get_pgd() + index;
440 pgd_k = init_mm.pgd + index;
441
442 if (pgd_none(*pgd_k))
443 goto bad_area;
444 if (!pgd_present(*pgd))
445 set_pgd(pgd, *pgd_k);
446
447 pud = pud_offset(pgd, addr);
448 pud_k = pud_offset(pgd_k, addr);
449
450 if (pud_none(*pud_k))
451 goto bad_area;
452 if (!pud_present(*pud))
453 set_pud(pud, *pud_k);
454
455 pmd = pmd_offset(pud, addr);
456 pmd_k = pmd_offset(pud_k, addr);
457
458#ifdef CONFIG_ARM_LPAE
459
460
461
462 index = 0;
463#else
464
465
466
467
468
469
470
471
472 index = (addr >> SECTION_SHIFT) & 1;
473#endif
474 if (pmd_none(pmd_k[index]))
475 goto bad_area;
476
477 copy_pmd(pmd, pmd_k);
478 return 0;
479
480bad_area:
481 do_bad_area(addr, fsr, regs);
482 return 0;
483}
484#else
485static int
486do_translation_fault(unsigned long addr, unsigned int fsr,
487 struct pt_regs *regs)
488{
489 return 0;
490}
491#endif
492
493
494
495
496
497#ifndef CONFIG_ARM_LPAE
498static int
499do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
500{
501 do_bad_area(addr, fsr, regs);
502 return 0;
503}
504#endif
505
506
507
508
509static int
510do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
511{
512 return 1;
513}
514
515struct fsr_info {
516 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
517 int sig;
518 int code;
519 const char *name;
520};
521
522
523#ifdef CONFIG_ARM_LPAE
524#include "fsr-3level.c"
525#else
526#include "fsr-2level.c"
527#endif
528
529void __init
530hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
531 int sig, int code, const char *name)
532{
533 if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
534 BUG();
535
536 fsr_info[nr].fn = fn;
537 fsr_info[nr].sig = sig;
538 fsr_info[nr].code = code;
539 fsr_info[nr].name = name;
540}
541
542
543
544
545asmlinkage void
546do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
547{
548 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
549
550 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
551 return;
552
553 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
554 inf->name, fsr, addr);
555 show_pte(current->mm, addr);
556
557 arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
558 fsr, 0);
559}
560
561void __init
562hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
563 int sig, int code, const char *name)
564{
565 if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
566 BUG();
567
568 ifsr_info[nr].fn = fn;
569 ifsr_info[nr].sig = sig;
570 ifsr_info[nr].code = code;
571 ifsr_info[nr].name = name;
572}
573
574asmlinkage void
575do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
576{
577 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
578
579 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
580 return;
581
582 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
583 inf->name, ifsr, addr);
584
585 arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
586 ifsr, 0);
587}
588
589
590
591
592
593
594static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
595 struct pt_regs *regs)
596{
597 pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
598 "first unmask, this is most likely caused by a "
599 "firmware/bootloader bug.\n", fsr);
600
601 return 0;
602}
603
604void __init early_abt_enable(void)
605{
606 fsr_info[FSR_FS_AEA].fn = early_abort_handler;
607 local_abt_enable();
608 fsr_info[FSR_FS_AEA].fn = do_bad;
609}
610
611#ifndef CONFIG_ARM_LPAE
612static int __init exceptions_init(void)
613{
614 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
615 hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
616 "I-cache maintenance fault");
617 }
618
619 if (cpu_architecture() >= CPU_ARCH_ARMv7) {
620
621
622
623
624 hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
625 "section access flag fault");
626 hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
627 "section access flag fault");
628 }
629
630 return 0;
631}
632
633arch_initcall(exceptions_init);
634#endif
635