1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/kprobes.h>
30#include <linux/ptrace.h>
31#include <linux/preempt.h>
32#include <linux/module.h>
33#include <linux/kdebug.h>
34#include <linux/slab.h>
35#include <asm/cacheflush.h>
36#include <asm/sstep.h>
37#include <asm/uaccess.h>
38
39DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
40DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
41
42struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
43
44int __kprobes arch_prepare_kprobe(struct kprobe *p)
45{
46 int ret = 0;
47 kprobe_opcode_t insn = *p->addr;
48
49 if ((unsigned long)p->addr & 0x03) {
50 printk("Attempt to register kprobe at an unaligned address\n");
51 ret = -EINVAL;
52 } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
53 printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
54 ret = -EINVAL;
55 }
56
57
58
59 if (!ret) {
60 p->ainsn.insn = get_insn_slot();
61 if (!p->ainsn.insn)
62 ret = -ENOMEM;
63 }
64
65 if (!ret) {
66 memcpy(p->ainsn.insn, p->addr,
67 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
68 p->opcode = *p->addr;
69 flush_icache_range((unsigned long)p->ainsn.insn,
70 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
71 }
72
73 p->ainsn.boostable = 0;
74 return ret;
75}
76
77void __kprobes arch_arm_kprobe(struct kprobe *p)
78{
79 *p->addr = BREAKPOINT_INSTRUCTION;
80 flush_icache_range((unsigned long) p->addr,
81 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
82}
83
84void __kprobes arch_disarm_kprobe(struct kprobe *p)
85{
86 *p->addr = p->opcode;
87 flush_icache_range((unsigned long) p->addr,
88 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
89}
90
91void __kprobes arch_remove_kprobe(struct kprobe *p)
92{
93 if (p->ainsn.insn) {
94 free_insn_slot(p->ainsn.insn, 0);
95 p->ainsn.insn = NULL;
96 }
97}
98
99static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
100{
101 enable_single_step(regs);
102
103
104
105
106
107
108
109 regs->nip = (unsigned long)p->ainsn.insn;
110}
111
112static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
113{
114 kcb->prev_kprobe.kp = kprobe_running();
115 kcb->prev_kprobe.status = kcb->kprobe_status;
116 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
117}
118
119static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
120{
121 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
122 kcb->kprobe_status = kcb->prev_kprobe.status;
123 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
124}
125
126static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
127 struct kprobe_ctlblk *kcb)
128{
129 __get_cpu_var(current_kprobe) = p;
130 kcb->kprobe_saved_msr = regs->msr;
131}
132
133void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
134 struct pt_regs *regs)
135{
136 ri->ret_addr = (kprobe_opcode_t *)regs->link;
137
138
139 regs->link = (unsigned long)kretprobe_trampoline;
140}
141
142static int __kprobes kprobe_handler(struct pt_regs *regs)
143{
144 struct kprobe *p;
145 int ret = 0;
146 unsigned int *addr = (unsigned int *)regs->nip;
147 struct kprobe_ctlblk *kcb;
148
149
150
151
152
153 preempt_disable();
154 kcb = get_kprobe_ctlblk();
155
156
157 if (kprobe_running()) {
158 p = get_kprobe(addr);
159 if (p) {
160 kprobe_opcode_t insn = *p->ainsn.insn;
161 if (kcb->kprobe_status == KPROBE_HIT_SS &&
162 is_trap(insn)) {
163
164 regs->msr &= ~MSR_SINGLESTEP;
165 regs->msr |= kcb->kprobe_saved_msr;
166 goto no_kprobe;
167 }
168
169
170
171
172
173
174 save_previous_kprobe(kcb);
175 set_current_kprobe(p, regs, kcb);
176 kcb->kprobe_saved_msr = regs->msr;
177 kprobes_inc_nmissed_count(p);
178 prepare_singlestep(p, regs);
179 kcb->kprobe_status = KPROBE_REENTER;
180 return 1;
181 } else {
182 if (*addr != BREAKPOINT_INSTRUCTION) {
183
184 kprobe_opcode_t cur_insn = *addr;
185 if (is_trap(cur_insn))
186 goto no_kprobe;
187
188
189
190
191 ret = 1;
192 goto no_kprobe;
193 }
194 p = __get_cpu_var(current_kprobe);
195 if (p->break_handler && p->break_handler(p, regs)) {
196 goto ss_probe;
197 }
198 }
199 goto no_kprobe;
200 }
201
202 p = get_kprobe(addr);
203 if (!p) {
204 if (*addr != BREAKPOINT_INSTRUCTION) {
205
206
207
208
209
210 kprobe_opcode_t cur_insn = *addr;
211 if (is_trap(cur_insn))
212 goto no_kprobe;
213
214
215
216
217
218
219
220 ret = 1;
221 }
222
223 goto no_kprobe;
224 }
225
226 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
227 set_current_kprobe(p, regs, kcb);
228 if (p->pre_handler && p->pre_handler(p, regs))
229
230 return 1;
231
232ss_probe:
233 if (p->ainsn.boostable >= 0) {
234 unsigned int insn = *p->ainsn.insn;
235
236
237 ret = emulate_step(regs, insn);
238 if (ret > 0) {
239
240
241
242
243 if (unlikely(p->ainsn.boostable == 0))
244 p->ainsn.boostable = 1;
245
246 if (p->post_handler)
247 p->post_handler(p, regs, 0);
248
249 kcb->kprobe_status = KPROBE_HIT_SSDONE;
250 reset_current_kprobe();
251 preempt_enable_no_resched();
252 return 1;
253 } else if (ret < 0) {
254
255
256
257
258
259 printk("Can't step on instruction %x\n", insn);
260 BUG();
261 } else if (ret == 0)
262
263 p->ainsn.boostable = -1;
264 }
265 prepare_singlestep(p, regs);
266 kcb->kprobe_status = KPROBE_HIT_SS;
267 return 1;
268
269no_kprobe:
270 preempt_enable_no_resched();
271 return ret;
272}
273
274
275
276
277
278
279
280static void __used kretprobe_trampoline_holder(void)
281{
282 asm volatile(".global kretprobe_trampoline\n"
283 "kretprobe_trampoline:\n"
284 "nop\n");
285}
286
287
288
289
290static int __kprobes trampoline_probe_handler(struct kprobe *p,
291 struct pt_regs *regs)
292{
293 struct kretprobe_instance *ri = NULL;
294 struct hlist_head *head, empty_rp;
295 struct hlist_node *tmp;
296 unsigned long flags, orig_ret_address = 0;
297 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
298
299 INIT_HLIST_HEAD(&empty_rp);
300 kretprobe_hash_lock(current, &head, &flags);
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
316 if (ri->task != current)
317
318 continue;
319
320 if (ri->rp && ri->rp->handler)
321 ri->rp->handler(ri, regs);
322
323 orig_ret_address = (unsigned long)ri->ret_addr;
324 recycle_rp_inst(ri, &empty_rp);
325
326 if (orig_ret_address != trampoline_address)
327
328
329
330
331
332 break;
333 }
334
335 kretprobe_assert(ri, orig_ret_address, trampoline_address);
336 regs->nip = orig_ret_address;
337
338 reset_current_kprobe();
339 kretprobe_hash_unlock(current, &flags);
340 preempt_enable_no_resched();
341
342 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
343 hlist_del(&ri->hlist);
344 kfree(ri);
345 }
346
347
348
349
350
351 return 1;
352}
353
354
355
356
357
358
359
360
361
362static int __kprobes post_kprobe_handler(struct pt_regs *regs)
363{
364 struct kprobe *cur = kprobe_running();
365 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
366
367 if (!cur)
368 return 0;
369
370
371 if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
372 return 0;
373
374 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
375 kcb->kprobe_status = KPROBE_HIT_SSDONE;
376 cur->post_handler(cur, regs, 0);
377 }
378
379
380 regs->nip = (unsigned long)cur->addr + 4;
381 regs->msr |= kcb->kprobe_saved_msr;
382
383
384 if (kcb->kprobe_status == KPROBE_REENTER) {
385 restore_previous_kprobe(kcb);
386 goto out;
387 }
388 reset_current_kprobe();
389out:
390 preempt_enable_no_resched();
391
392
393
394
395
396
397 if (regs->msr & MSR_SINGLESTEP)
398 return 0;
399
400 return 1;
401}
402
403int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
404{
405 struct kprobe *cur = kprobe_running();
406 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
407 const struct exception_table_entry *entry;
408
409 switch(kcb->kprobe_status) {
410 case KPROBE_HIT_SS:
411 case KPROBE_REENTER:
412
413
414
415
416
417
418
419 regs->nip = (unsigned long)cur->addr;
420 regs->msr &= ~MSR_SINGLESTEP;
421 regs->msr |= kcb->kprobe_saved_msr;
422 if (kcb->kprobe_status == KPROBE_REENTER)
423 restore_previous_kprobe(kcb);
424 else
425 reset_current_kprobe();
426 preempt_enable_no_resched();
427 break;
428 case KPROBE_HIT_ACTIVE:
429 case KPROBE_HIT_SSDONE:
430
431
432
433
434
435 kprobes_inc_nmissed_count(cur);
436
437
438
439
440
441
442
443
444 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
445 return 1;
446
447
448
449
450
451 if ((entry = search_exception_tables(regs->nip)) != NULL) {
452 regs->nip = entry->fixup;
453 return 1;
454 }
455
456
457
458
459
460 break;
461 default:
462 break;
463 }
464 return 0;
465}
466
467
468
469
470int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
471 unsigned long val, void *data)
472{
473 struct die_args *args = (struct die_args *)data;
474 int ret = NOTIFY_DONE;
475
476 if (args->regs && user_mode(args->regs))
477 return ret;
478
479 switch (val) {
480 case DIE_BPT:
481 if (kprobe_handler(args->regs))
482 ret = NOTIFY_STOP;
483 break;
484 case DIE_SSTEP:
485 if (post_kprobe_handler(args->regs))
486 ret = NOTIFY_STOP;
487 break;
488 default:
489 break;
490 }
491 return ret;
492}
493
494#ifdef CONFIG_PPC64
495unsigned long arch_deref_entry_point(void *entry)
496{
497 return ((func_descr_t *)entry)->entry;
498}
499#endif
500
501int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
502{
503 struct jprobe *jp = container_of(p, struct jprobe, kp);
504 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
505
506 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
507
508
509 regs->nip = arch_deref_entry_point(jp->entry);
510#ifdef CONFIG_PPC64
511 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
512#endif
513
514 return 1;
515}
516
517void __used __kprobes jprobe_return(void)
518{
519 asm volatile("trap" ::: "memory");
520}
521
522static void __used __kprobes jprobe_return_end(void)
523{
524};
525
526int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
527{
528 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
529
530
531
532
533
534
535 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
536 preempt_enable_no_resched();
537 return 1;
538}
539
540static struct kprobe trampoline_p = {
541 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
542 .pre_handler = trampoline_probe_handler
543};
544
545int __init arch_init_kprobes(void)
546{
547 return register_kprobe(&trampoline_p);
548}
549
550int __kprobes arch_trampoline_kprobe(struct kprobe *p)
551{
552 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
553 return 1;
554
555 return 0;
556}
557