1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/signal.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/string.h>
23#include <linux/types.h>
24#include <linux/ptrace.h>
25#include <linux/mman.h>
26#include <linux/mm.h>
27#include <linux/interrupt.h>
28#include <linux/highmem.h>
29#include <linux/module.h>
30#include <linux/kprobes.h>
31#include <linux/kdebug.h>
32#include <linux/perf_event.h>
33#include <linux/magic.h>
34#include <linux/ratelimit.h>
35
36#include <asm/firmware.h>
37#include <asm/page.h>
38#include <asm/pgtable.h>
39#include <asm/mmu.h>
40#include <asm/mmu_context.h>
41#include <asm/system.h>
42#include <asm/uaccess.h>
43#include <asm/tlbflush.h>
44#include <asm/siginfo.h>
45#include <mm/mmu_decl.h>
46
47#include "icswx.h"
48
49#ifdef CONFIG_KPROBES
50static inline int notify_page_fault(struct pt_regs *regs)
51{
52 int ret = 0;
53
54
55 if (!user_mode(regs)) {
56 preempt_disable();
57 if (kprobe_running() && kprobe_fault_handler(regs, 11))
58 ret = 1;
59 preempt_enable();
60 }
61
62 return ret;
63}
64#else
65static inline int notify_page_fault(struct pt_regs *regs)
66{
67 return 0;
68}
69#endif
70
71
72
73
74
75static int store_updates_sp(struct pt_regs *regs)
76{
77 unsigned int inst;
78
79 if (get_user(inst, (unsigned int __user *)regs->nip))
80 return 0;
81
82 if (((inst >> 16) & 0x1f) != 1)
83 return 0;
84
85 switch (inst >> 26) {
86 case 37:
87 case 39:
88 case 45:
89 case 53:
90 case 55:
91 return 1;
92 case 62:
93 return (inst & 3) == 1;
94 case 31:
95
96 switch ((inst >> 1) & 0x3ff) {
97 case 181:
98 case 183:
99 case 247:
100 case 439:
101 case 695:
102 case 759:
103 return 1;
104 }
105 }
106 return 0;
107}
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
123 unsigned long error_code)
124{
125 struct vm_area_struct * vma;
126 struct mm_struct *mm = current->mm;
127 siginfo_t info;
128 int code = SEGV_MAPERR;
129 int is_write = 0, ret;
130 int trap = TRAP(regs);
131 int is_exec = trap == 0x400;
132
133#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
134
135
136
137
138
139
140 if (trap == 0x400)
141 error_code &= 0x48200000;
142 else
143 is_write = error_code & DSISR_ISSTORE;
144#else
145 is_write = error_code & ESR_DST;
146#endif
147
148#ifdef CONFIG_PPC_ICSWX
149
150
151
152
153
154 if (error_code & ICSWX_DSI_UCT) {
155 int ret;
156
157 ret = acop_handle_fault(regs, address, error_code);
158 if (ret)
159 return ret;
160 }
161#endif
162
163 if (notify_page_fault(regs))
164 return 0;
165
166 if (unlikely(debugger_fault_handler(regs)))
167 return 0;
168
169
170 if (!user_mode(regs) && (address >= TASK_SIZE))
171 return SIGSEGV;
172
173#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
174 defined(CONFIG_PPC_BOOK3S_64))
175 if (error_code & DSISR_DABRMATCH) {
176
177 do_dabr(regs, address, error_code);
178 return 0;
179 }
180#endif
181
182 if (in_atomic() || mm == NULL) {
183 if (!user_mode(regs))
184 return SIGSEGV;
185
186
187 printk(KERN_EMERG "Page fault in user mode with "
188 "in_atomic() = %d mm = %p\n", in_atomic(), mm);
189 printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
190 regs->nip, regs->msr);
191 die("Weird page fault", regs, SIGSEGV);
192 }
193
194 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211 if (!down_read_trylock(&mm->mmap_sem)) {
212 if (!user_mode(regs) && !search_exception_tables(regs->nip))
213 goto bad_area_nosemaphore;
214
215 down_read(&mm->mmap_sem);
216 }
217
218 vma = find_vma(mm, address);
219 if (!vma)
220 goto bad_area;
221 if (vma->vm_start <= address)
222 goto good_area;
223 if (!(vma->vm_flags & VM_GROWSDOWN))
224 goto bad_area;
225
226
227
228
229
230
231
232
233
234
235 if (address + 0x100000 < vma->vm_end) {
236
237 struct pt_regs *uregs = current->thread.regs;
238 if (uregs == NULL)
239 goto bad_area;
240
241
242
243
244
245
246
247
248
249
250
251
252
253 if (address + 2048 < uregs->gpr[1]
254 && (!user_mode(regs) || !store_updates_sp(regs)))
255 goto bad_area;
256 }
257 if (expand_stack(vma, address))
258 goto bad_area;
259
260good_area:
261 code = SEGV_ACCERR;
262#if defined(CONFIG_6xx)
263 if (error_code & 0x95700000)
264
265
266 goto bad_area;
267#endif
268#if defined(CONFIG_8xx)
269
270
271
272 if (error_code & 0x40000000)
273 _tlbil_va(address, 0, 0, 0);
274
275
276
277
278
279 if (error_code & 0x10000000)
280
281 goto bad_area;
282#endif
283
284 if (is_exec) {
285#ifdef CONFIG_PPC_STD_MMU
286
287
288
289
290
291
292
293
294 if (error_code & DSISR_PROTFAULT)
295 goto bad_area;
296#endif
297
298
299
300
301
302
303
304
305
306
307
308 if (!(vma->vm_flags & VM_EXEC) &&
309 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
310 !(vma->vm_flags & (VM_READ | VM_WRITE))))
311 goto bad_area;
312
313 } else if (is_write) {
314 if (!(vma->vm_flags & VM_WRITE))
315 goto bad_area;
316
317 } else {
318
319 if (error_code & 0x08000000)
320 goto bad_area;
321 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
322 goto bad_area;
323 }
324
325
326
327
328
329
330 ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
331 if (unlikely(ret & VM_FAULT_ERROR)) {
332 if (ret & VM_FAULT_OOM)
333 goto out_of_memory;
334 else if (ret & VM_FAULT_SIGBUS)
335 goto do_sigbus;
336 BUG();
337 }
338 if (ret & VM_FAULT_MAJOR) {
339 current->maj_flt++;
340 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
341 regs, address);
342#ifdef CONFIG_PPC_SMLPAR
343 if (firmware_has_feature(FW_FEATURE_CMO)) {
344 preempt_disable();
345 get_lppaca()->page_ins += (1 << PAGE_FACTOR);
346 preempt_enable();
347 }
348#endif
349 } else {
350 current->min_flt++;
351 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
352 regs, address);
353 }
354 up_read(&mm->mmap_sem);
355 return 0;
356
357bad_area:
358 up_read(&mm->mmap_sem);
359
360bad_area_nosemaphore:
361
362 if (user_mode(regs)) {
363 _exception(SIGSEGV, regs, code, address);
364 return 0;
365 }
366
367 if (is_exec && (error_code & DSISR_PROTFAULT))
368 printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
369 " page (%lx) - exploit attempt? (uid: %d)\n",
370 address, current_uid());
371
372 return SIGSEGV;
373
374
375
376
377
378out_of_memory:
379 up_read(&mm->mmap_sem);
380 if (!user_mode(regs))
381 return SIGKILL;
382 pagefault_out_of_memory();
383 return 0;
384
385do_sigbus:
386 up_read(&mm->mmap_sem);
387 if (user_mode(regs)) {
388 info.si_signo = SIGBUS;
389 info.si_errno = 0;
390 info.si_code = BUS_ADRERR;
391 info.si_addr = (void __user *)address;
392 force_sig_info(SIGBUS, &info, current);
393 return 0;
394 }
395 return SIGBUS;
396}
397
398
399
400
401
402
403void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
404{
405 const struct exception_table_entry *entry;
406 unsigned long *stackend;
407
408
409 if ((entry = search_exception_tables(regs->nip)) != NULL) {
410 regs->nip = entry->fixup;
411 return;
412 }
413
414
415
416 switch (regs->trap) {
417 case 0x300:
418 case 0x380:
419 printk(KERN_ALERT "Unable to handle kernel paging request for "
420 "data at address 0x%08lx\n", regs->dar);
421 break;
422 case 0x400:
423 case 0x480:
424 printk(KERN_ALERT "Unable to handle kernel paging request for "
425 "instruction fetch\n");
426 break;
427 default:
428 printk(KERN_ALERT "Unable to handle kernel paging request for "
429 "unknown fault\n");
430 break;
431 }
432 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
433 regs->nip);
434
435 stackend = end_of_stack(current);
436 if (current != &init_task && *stackend != STACK_END_MAGIC)
437 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
438
439 die("Kernel access of bad area", regs, sig);
440}
441