1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/signal.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/string.h>
23#include <linux/types.h>
24#include <linux/ptrace.h>
25#include <linux/mman.h>
26#include <linux/mm.h>
27#include <linux/interrupt.h>
28#include <linux/highmem.h>
29#include <linux/module.h>
30#include <linux/kprobes.h>
31#include <linux/kdebug.h>
32#include <linux/perf_event.h>
33#include <linux/ratelimit.h>
34#include <linux/context_tracking.h>
35#include <linux/hugetlb.h>
36#include <linux/uaccess.h>
37
38#include <asm/firmware.h>
39#include <asm/page.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/mmu_context.h>
43#include <asm/tlbflush.h>
44#include <asm/siginfo.h>
45#include <asm/debug.h>
46
47#include "icswx.h"
48
49#ifdef CONFIG_KPROBES
50static inline int notify_page_fault(struct pt_regs *regs)
51{
52 int ret = 0;
53
54
55 if (!user_mode(regs)) {
56 preempt_disable();
57 if (kprobe_running() && kprobe_fault_handler(regs, 11))
58 ret = 1;
59 preempt_enable();
60 }
61
62 return ret;
63}
64#else
65static inline int notify_page_fault(struct pt_regs *regs)
66{
67 return 0;
68}
69#endif
70
71
72
73
74
75static int store_updates_sp(struct pt_regs *regs)
76{
77 unsigned int inst;
78
79 if (get_user(inst, (unsigned int __user *)regs->nip))
80 return 0;
81
82 if (((inst >> 16) & 0x1f) != 1)
83 return 0;
84
85 switch (inst >> 26) {
86 case 37:
87 case 39:
88 case 45:
89 case 53:
90 case 55:
91 return 1;
92 case 62:
93 return (inst & 3) == 1;
94 case 31:
95
96 switch ((inst >> 1) & 0x3ff) {
97 case 181:
98 case 183:
99 case 247:
100 case 439:
101 case 695:
102 case 759:
103 return 1;
104 }
105 }
106 return 0;
107}
108
109
110
111
112#define MM_FAULT_RETURN 0
113#define MM_FAULT_CONTINUE -1
114#define MM_FAULT_ERR(sig) (sig)
115
116static int do_sigbus(struct pt_regs *regs, unsigned long address,
117 unsigned int fault)
118{
119 siginfo_t info;
120 unsigned int lsb = 0;
121
122 up_read(¤t->mm->mmap_sem);
123
124 if (!user_mode(regs))
125 return MM_FAULT_ERR(SIGBUS);
126
127 current->thread.trap_nr = BUS_ADRERR;
128 info.si_signo = SIGBUS;
129 info.si_errno = 0;
130 info.si_code = BUS_ADRERR;
131 info.si_addr = (void __user *)address;
132#ifdef CONFIG_MEMORY_FAILURE
133 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
134 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
135 current->comm, current->pid, address);
136 info.si_code = BUS_MCEERR_AR;
137 }
138
139 if (fault & VM_FAULT_HWPOISON_LARGE)
140 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
141 if (fault & VM_FAULT_HWPOISON)
142 lsb = PAGE_SHIFT;
143#endif
144 info.si_addr_lsb = lsb;
145 force_sig_info(SIGBUS, &info, current);
146 return MM_FAULT_RETURN;
147}
148
149static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
150{
151
152
153
154
155 if (fatal_signal_pending(current)) {
156
157
158
159
160
161 if (!(fault & VM_FAULT_RETRY))
162 up_read(¤t->mm->mmap_sem);
163
164 if (user_mode(regs))
165 return MM_FAULT_RETURN;
166 return MM_FAULT_ERR(SIGKILL);
167 }
168
169
170 if (!(fault & VM_FAULT_ERROR))
171 return MM_FAULT_CONTINUE;
172
173
174 if (fault & VM_FAULT_OOM) {
175 up_read(¤t->mm->mmap_sem);
176
177
178
179
180
181 if (!user_mode(regs))
182 return MM_FAULT_ERR(SIGKILL);
183 pagefault_out_of_memory();
184 return MM_FAULT_RETURN;
185 }
186
187 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE))
188 return do_sigbus(regs, addr, fault);
189
190
191 BUG();
192 return MM_FAULT_CONTINUE;
193}
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
209 unsigned long error_code)
210{
211 enum ctx_state prev_state = exception_enter();
212 struct vm_area_struct * vma;
213 struct mm_struct *mm = current->mm;
214 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
215 int code = SEGV_MAPERR;
216 int is_write = 0;
217 int trap = TRAP(regs);
218 int is_exec = trap == 0x400;
219 int fault;
220 int rc = 0, store_update_sp = 0;
221
222#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
223
224
225
226
227
228
229 if (trap == 0x400)
230 error_code &= 0x48200000;
231 else
232 is_write = error_code & DSISR_ISSTORE;
233#else
234 is_write = error_code & ESR_DST;
235#endif
236
237#ifdef CONFIG_PPC_ICSWX
238
239
240
241
242
243 if (error_code & ICSWX_DSI_UCT) {
244 rc = acop_handle_fault(regs, address, error_code);
245 if (rc)
246 goto bail;
247 }
248#endif
249
250 if (notify_page_fault(regs))
251 goto bail;
252
253 if (unlikely(debugger_fault_handler(regs)))
254 goto bail;
255
256
257 if (!user_mode(regs) && (address >= TASK_SIZE)) {
258 rc = SIGSEGV;
259 goto bail;
260 }
261
262#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
263 defined(CONFIG_PPC_BOOK3S_64))
264 if (error_code & DSISR_DABRMATCH) {
265
266 do_break(regs, address, error_code);
267 goto bail;
268 }
269#endif
270
271
272 if (!arch_irq_disabled_regs(regs))
273 local_irq_enable();
274
275 if (faulthandler_disabled() || mm == NULL) {
276 if (!user_mode(regs)) {
277 rc = SIGSEGV;
278 goto bail;
279 }
280
281
282 printk(KERN_EMERG "Page fault in user mode with "
283 "faulthandler_disabled() = %d mm = %p\n",
284 faulthandler_disabled(), mm);
285 printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
286 regs->nip, regs->msr);
287 die("Weird page fault", regs, SIGSEGV);
288 }
289
290 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
291
292
293
294
295
296
297 if (user_mode(regs))
298 store_update_sp = store_updates_sp(regs);
299
300 if (user_mode(regs))
301 flags |= FAULT_FLAG_USER;
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318 if (!down_read_trylock(&mm->mmap_sem)) {
319 if (!user_mode(regs) && !search_exception_tables(regs->nip))
320 goto bad_area_nosemaphore;
321
322retry:
323 down_read(&mm->mmap_sem);
324 } else {
325
326
327
328
329
330 might_sleep();
331 }
332
333 vma = find_vma(mm, address);
334 if (!vma)
335 goto bad_area;
336 if (vma->vm_start <= address)
337 goto good_area;
338 if (!(vma->vm_flags & VM_GROWSDOWN))
339 goto bad_area;
340
341
342
343
344
345
346
347
348
349
350 if (address + 0x100000 < vma->vm_end) {
351
352 struct pt_regs *uregs = current->thread.regs;
353 if (uregs == NULL)
354 goto bad_area;
355
356
357
358
359
360
361
362
363
364
365
366
367
368 if (address + 2048 < uregs->gpr[1] && !store_update_sp)
369 goto bad_area;
370 }
371 if (expand_stack(vma, address))
372 goto bad_area;
373
374good_area:
375 code = SEGV_ACCERR;
376#if defined(CONFIG_6xx)
377 if (error_code & 0x95700000)
378
379
380 goto bad_area;
381#endif
382#if defined(CONFIG_8xx)
383
384
385
386
387 if (error_code & 0x10000000)
388
389 goto bad_area;
390#endif
391
392 if (is_exec) {
393
394
395
396
397
398
399
400
401
402
403 if (!(vma->vm_flags & VM_EXEC) &&
404 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
405 !(vma->vm_flags & (VM_READ | VM_WRITE))))
406 goto bad_area;
407#ifdef CONFIG_PPC_STD_MMU
408
409
410
411
412
413 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
414#endif
415
416 } else if (is_write) {
417 if (!(vma->vm_flags & VM_WRITE))
418 goto bad_area;
419 flags |= FAULT_FLAG_WRITE;
420
421 } else {
422 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
423 goto bad_area;
424 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
425 }
426
427
428
429
430
431
432 fault = handle_mm_fault(mm, vma, address, flags);
433 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
434 if (fault & VM_FAULT_SIGSEGV)
435 goto bad_area;
436 rc = mm_fault_error(regs, address, fault);
437 if (rc >= MM_FAULT_RETURN)
438 goto bail;
439 else
440 rc = 0;
441 }
442
443
444
445
446
447
448 if (flags & FAULT_FLAG_ALLOW_RETRY) {
449 if (fault & VM_FAULT_MAJOR) {
450 current->maj_flt++;
451 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
452 regs, address);
453#ifdef CONFIG_PPC_SMLPAR
454 if (firmware_has_feature(FW_FEATURE_CMO)) {
455 u32 page_ins;
456
457 preempt_disable();
458 page_ins = be32_to_cpu(get_lppaca()->page_ins);
459 page_ins += 1 << PAGE_FACTOR;
460 get_lppaca()->page_ins = cpu_to_be32(page_ins);
461 preempt_enable();
462 }
463#endif
464 } else {
465 current->min_flt++;
466 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
467 regs, address);
468 }
469 if (fault & VM_FAULT_RETRY) {
470
471
472 flags &= ~FAULT_FLAG_ALLOW_RETRY;
473 flags |= FAULT_FLAG_TRIED;
474 goto retry;
475 }
476 }
477
478 up_read(&mm->mmap_sem);
479 goto bail;
480
481bad_area:
482 up_read(&mm->mmap_sem);
483
484bad_area_nosemaphore:
485
486 if (user_mode(regs)) {
487 _exception(SIGSEGV, regs, code, address);
488 goto bail;
489 }
490
491 if (is_exec && (error_code & DSISR_PROTFAULT))
492 printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
493 " page (%lx) - exploit attempt? (uid: %d)\n",
494 address, from_kuid(&init_user_ns, current_uid()));
495
496 rc = SIGSEGV;
497
498bail:
499 exception_exit(prev_state);
500 return rc;
501
502}
503
504
505
506
507
508
509void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
510{
511 const struct exception_table_entry *entry;
512
513
514 if ((entry = search_exception_tables(regs->nip)) != NULL) {
515 regs->nip = entry->fixup;
516 return;
517 }
518
519
520
521 switch (regs->trap) {
522 case 0x300:
523 case 0x380:
524 printk(KERN_ALERT "Unable to handle kernel paging request for "
525 "data at address 0x%08lx\n", regs->dar);
526 break;
527 case 0x400:
528 case 0x480:
529 printk(KERN_ALERT "Unable to handle kernel paging request for "
530 "instruction fetch\n");
531 break;
532 case 0x600:
533 printk(KERN_ALERT "Unable to handle kernel paging request for "
534 "unaligned access at address 0x%08lx\n", regs->dar);
535 break;
536 default:
537 printk(KERN_ALERT "Unable to handle kernel paging request for "
538 "unknown fault\n");
539 break;
540 }
541 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
542 regs->nip);
543
544 if (task_stack_end_corrupted(current))
545 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
546
547 die("Kernel access of bad area", regs, sig);
548}
549