1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <asm/pgtable.h>
28#include <asm/traps.h>
29#include <linux/uaccess.h>
30#include <linux/mm.h>
31#include <linux/sched/signal.h>
32#include <linux/signal.h>
33#include <linux/extable.h>
34#include <linux/hardirq.h>
35
36
37
38
39
40
41#define FLT_IFETCH -1
42#define FLT_LOAD 0
43#define FLT_STORE 1
44
45
46
47
48
49void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
50{
51 struct vm_area_struct *vma;
52 struct mm_struct *mm = current->mm;
53 int si_signo;
54 int si_code = SEGV_MAPERR;
55 vm_fault_t fault;
56 const struct exception_table_entry *fixup;
57 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
58
59
60
61
62
63 if (unlikely(in_interrupt() || !mm))
64 goto no_context;
65
66 local_irq_enable();
67
68 if (user_mode(regs))
69 flags |= FAULT_FLAG_USER;
70retry:
71 down_read(&mm->mmap_sem);
72 vma = find_vma(mm, address);
73 if (!vma)
74 goto bad_area;
75
76 if (vma->vm_start <= address)
77 goto good_area;
78
79 if (!(vma->vm_flags & VM_GROWSDOWN))
80 goto bad_area;
81
82 if (expand_stack(vma, address))
83 goto bad_area;
84
85good_area:
86
87 si_code = SEGV_ACCERR;
88
89 switch (cause) {
90 case FLT_IFETCH:
91 if (!(vma->vm_flags & VM_EXEC))
92 goto bad_area;
93 break;
94 case FLT_LOAD:
95 if (!(vma->vm_flags & VM_READ))
96 goto bad_area;
97 break;
98 case FLT_STORE:
99 if (!(vma->vm_flags & VM_WRITE))
100 goto bad_area;
101 flags |= FAULT_FLAG_WRITE;
102 break;
103 }
104
105 fault = handle_mm_fault(vma, address, flags);
106
107 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
108 return;
109
110
111 if (likely(!(fault & VM_FAULT_ERROR))) {
112 if (flags & FAULT_FLAG_ALLOW_RETRY) {
113 if (fault & VM_FAULT_MAJOR)
114 current->maj_flt++;
115 else
116 current->min_flt++;
117 if (fault & VM_FAULT_RETRY) {
118 flags &= ~FAULT_FLAG_ALLOW_RETRY;
119 flags |= FAULT_FLAG_TRIED;
120 goto retry;
121 }
122 }
123
124 up_read(&mm->mmap_sem);
125 return;
126 }
127
128 up_read(&mm->mmap_sem);
129
130
131 if (!user_mode(regs))
132 goto no_context;
133
134 if (fault & VM_FAULT_OOM) {
135 pagefault_out_of_memory();
136 return;
137 }
138
139
140
141
142 if (fault & VM_FAULT_SIGBUS) {
143 si_signo = SIGBUS;
144 si_code = BUS_ADRERR;
145 }
146
147 else {
148 si_signo = SIGSEGV;
149 si_code = SEGV_ACCERR;
150 }
151 force_sig_fault(si_signo, si_code, (void __user *)address, current);
152 return;
153
154bad_area:
155 up_read(&mm->mmap_sem);
156
157 if (user_mode(regs)) {
158 force_sig_fault(SIGSEGV, si_code, (void __user *)address, current);
159 return;
160 }
161
162
163no_context:
164 fixup = search_exception_tables(pt_elr(regs));
165 if (fixup) {
166 pt_set_elr(regs, fixup->fixup);
167 return;
168 }
169
170
171 bust_spinlocks(1);
172 printk(KERN_EMERG "Unable to handle kernel paging request at "
173 "virtual address 0x%08lx, regs %p\n", address, regs);
174 die("Bad Kernel VA", regs, SIGKILL);
175}
176
177
178void read_protection_fault(struct pt_regs *regs)
179{
180 unsigned long badvadr = pt_badva(regs);
181
182 do_page_fault(badvadr, FLT_LOAD, regs);
183}
184
185void write_protection_fault(struct pt_regs *regs)
186{
187 unsigned long badvadr = pt_badva(regs);
188
189 do_page_fault(badvadr, FLT_STORE, regs);
190}
191
192void execute_protection_fault(struct pt_regs *regs)
193{
194 unsigned long badvadr = pt_badva(regs);
195
196 do_page_fault(badvadr, FLT_IFETCH, regs);
197}
198