1
2
3
4
5
6
7
8
9
10#include <linux/mm.h>
11#include <linux/interrupt.h>
12#include <linux/module.h>
13#include <asm/uaccess.h>
14
15extern int find_fixup_code(struct pt_regs *);
16extern void die_if_kernel(const char *, struct pt_regs *, long);
17
18
19#undef DEBUG
20
21#ifdef DEBUG
22#define D(x) x
23#else
24#define D(x)
25#endif
26
27
28#define DPG(x)
29
30
31
32DEFINE_PER_CPU(pgd_t *, current_pgd);
33unsigned long cris_signal_return_page;
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52asmlinkage void
53do_page_fault(unsigned long address, struct pt_regs *regs,
54 int protection, int writeaccess)
55{
56 struct task_struct *tsk;
57 struct mm_struct *mm;
58 struct vm_area_struct * vma;
59 siginfo_t info;
60 int fault;
61
62 D(printk(KERN_DEBUG
63 "Page fault for %lX on %X at %lX, prot %d write %d\n",
64 address, smp_processor_id(), instruction_pointer(regs),
65 protection, writeaccess));
66
67 tsk = current;
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88 if (address >= VMALLOC_START &&
89 !protection &&
90 !user_mode(regs))
91 goto vmalloc_fault;
92
93
94
95
96
97
98
99 if (cris_signal_return_page &&
100 address == cris_signal_return_page &&
101 !protection && user_mode(regs))
102 goto vmalloc_fault;
103
104
105 local_irq_enable();
106
107 mm = tsk->mm;
108 info.si_code = SEGV_MAPERR;
109
110
111
112
113
114
115 if (in_interrupt() || !mm)
116 goto no_context;
117
118 down_read(&mm->mmap_sem);
119 vma = find_vma(mm, address);
120 if (!vma)
121 goto bad_area;
122 if (vma->vm_start <= address)
123 goto good_area;
124 if (!(vma->vm_flags & VM_GROWSDOWN))
125 goto bad_area;
126 if (user_mode(regs)) {
127
128
129
130
131
132
133 if (address + PAGE_SIZE < rdusp())
134 goto bad_area;
135 }
136 if (expand_stack(vma, address))
137 goto bad_area;
138
139
140
141
142
143
144 good_area:
145 info.si_code = SEGV_ACCERR;
146
147
148
149 if (writeaccess == 2){
150 if (!(vma->vm_flags & VM_EXEC))
151 goto bad_area;
152 } else if (writeaccess == 1) {
153 if (!(vma->vm_flags & VM_WRITE))
154 goto bad_area;
155 } else {
156 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
157 goto bad_area;
158 }
159
160
161
162
163
164
165
166 fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
167 if (unlikely(fault & VM_FAULT_ERROR)) {
168 if (fault & VM_FAULT_OOM)
169 goto out_of_memory;
170 else if (fault & VM_FAULT_SIGBUS)
171 goto do_sigbus;
172 BUG();
173 }
174 if (fault & VM_FAULT_MAJOR)
175 tsk->maj_flt++;
176 else
177 tsk->min_flt++;
178
179 up_read(&mm->mmap_sem);
180 return;
181
182
183
184
185
186
187 bad_area:
188 up_read(&mm->mmap_sem);
189
190 bad_area_nosemaphore:
191 DPG(show_registers(regs));
192
193
194
195 if (user_mode(regs)) {
196 info.si_signo = SIGSEGV;
197 info.si_errno = 0;
198
199 info.si_addr = (void *)address;
200 force_sig_info(SIGSEGV, &info, tsk);
201 printk(KERN_NOTICE "%s (pid %d) segfaults for page "
202 "address %08lx at pc %08lx\n",
203 tsk->comm, tsk->pid, address, instruction_pointer(regs));
204 return;
205 }
206
207 no_context:
208
209
210
211
212
213
214
215
216
217
218 if (find_fixup_code(regs))
219 return;
220
221
222
223
224
225
226 if (!oops_in_progress) {
227 oops_in_progress = 1;
228 if ((unsigned long) (address) < PAGE_SIZE)
229 printk(KERN_ALERT "Unable to handle kernel NULL "
230 "pointer dereference");
231 else
232 printk(KERN_ALERT "Unable to handle kernel access"
233 " at virtual address %08lx\n", address);
234
235 die_if_kernel("Oops", regs, (writeaccess << 1) | protection);
236 oops_in_progress = 0;
237 }
238
239 do_exit(SIGKILL);
240
241
242
243
244
245
246 out_of_memory:
247 up_read(&mm->mmap_sem);
248 printk("VM: killing process %s\n", tsk->comm);
249 if (user_mode(regs))
250 do_exit(SIGKILL);
251 goto no_context;
252
253 do_sigbus:
254 up_read(&mm->mmap_sem);
255
256
257
258
259
260 info.si_signo = SIGBUS;
261 info.si_errno = 0;
262 info.si_code = BUS_ADRERR;
263 info.si_addr = (void *)address;
264 force_sig_info(SIGBUS, &info, tsk);
265
266
267 if (!user_mode(regs))
268 goto no_context;
269 return;
270
271vmalloc_fault:
272 {
273
274
275
276
277
278
279
280
281
282
283
284 int offset = pgd_index(address);
285 pgd_t *pgd, *pgd_k;
286 pud_t *pud, *pud_k;
287 pmd_t *pmd, *pmd_k;
288 pte_t *pte_k;
289
290 pgd = (pgd_t *)per_cpu(current_pgd, smp_processor_id()) + offset;
291 pgd_k = init_mm.pgd + offset;
292
293
294
295
296
297
298
299
300
301
302
303
304
305 pud = pud_offset(pgd, address);
306 pud_k = pud_offset(pgd_k, address);
307 if (!pud_present(*pud_k))
308 goto no_context;
309
310 pmd = pmd_offset(pud, address);
311 pmd_k = pmd_offset(pud_k, address);
312
313 if (!pmd_present(*pmd_k))
314 goto bad_area_nosemaphore;
315
316 set_pmd(pmd, *pmd_k);
317
318
319
320
321
322
323
324 pte_k = pte_offset_kernel(pmd_k, address);
325 if (!pte_present(*pte_k))
326 goto no_context;
327
328 return;
329 }
330}
331
332
333int
334find_fixup_code(struct pt_regs *regs)
335{
336 const struct exception_table_entry *fixup;
337
338 if ((fixup = search_exception_tables(instruction_pointer(regs))) != 0) {
339
340 instruction_pointer(regs) = fixup->fixup;
341 arch_fixup(regs);
342 return 1;
343 }
344
345 return 0;
346}
347