1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/ptrace.h>
26#include <linux/uprobes.h>
27#include <linux/uaccess.h>
28
29#include <linux/kdebug.h>
30#include <asm/processor.h>
31#include <asm/insn.h>
32#include <asm/mmu_context.h>
33
34
35
36
37#define UPROBE_FIX_IP 0x01
38
39
40#define UPROBE_FIX_CALL 0x02
41
42
43#define UPROBE_FIX_SETF 0x04
44
45#define UPROBE_FIX_RIP_SI 0x08
46#define UPROBE_FIX_RIP_DI 0x10
47#define UPROBE_FIX_RIP_BX 0x20
48#define UPROBE_FIX_RIP_MASK \
49 (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX)
50
51#define UPROBE_TRAP_NR UINT_MAX
52
53
54#define OPCODE1(insn) ((insn)->opcode.bytes[0])
55#define OPCODE2(insn) ((insn)->opcode.bytes[1])
56#define OPCODE3(insn) ((insn)->opcode.bytes[2])
57#define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
58
59#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
60 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
61 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
62 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
63 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
64 << (row % 32))
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
99static volatile u32 good_insns_32[256 / 32] = {
100
101
102 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) |
103 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) ,
104 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
105 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
106 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
107 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
108 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
109 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
110 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
111 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
112 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
113 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
114 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
115 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
116 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) |
117 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)
118
119
120};
121#else
122#define good_insns_32 NULL
123#endif
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160#if defined(CONFIG_X86_64)
161static volatile u32 good_insns_64[256 / 32] = {
162
163
164 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) |
165 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) ,
166 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) |
167 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) ,
168 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
169 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
170 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
171 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
172 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
173 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) ,
174 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
175 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
176 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
177 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
178 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) |
179 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)
180
181
182};
183#else
184#define good_insns_64 NULL
185#endif
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211static volatile u32 good_2byte_insns[256 / 32] = {
212
213
214 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) |
215 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
216 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
217 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) ,
218 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
219 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
220 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
221 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) ,
222 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
223 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
224 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) |
225 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
226 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
227 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
228 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
229 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
230
231
232};
233#undef W
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269static bool is_prefix_bad(struct insn *insn)
270{
271 int i;
272
273 for (i = 0; i < insn->prefixes.nbytes; i++) {
274 switch (insn->prefixes.bytes[i]) {
275 case 0x26:
276 case 0x2E:
277 case 0x36:
278 case 0x3E:
279 case 0xF0:
280 return true;
281 }
282 }
283 return false;
284}
285
286static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
287{
288 u32 volatile *good_insns;
289
290 insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
291
292 insn_get_length(insn);
293 if (WARN_ON_ONCE(!insn_complete(insn)))
294 return -ENOEXEC;
295
296 if (is_prefix_bad(insn))
297 return -ENOTSUPP;
298
299 if (x86_64)
300 good_insns = good_insns_64;
301 else
302 good_insns = good_insns_32;
303
304 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
305 return 0;
306
307 if (insn->opcode.nbytes == 2) {
308 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
309 return 0;
310 }
311
312 return -ENOTSUPP;
313}
314
315#ifdef CONFIG_X86_64
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
341{
342 u8 *cursor;
343 u8 reg;
344 u8 reg2;
345
346 if (!insn_rip_relative(insn))
347 return;
348
349
350
351
352
353
354 if (insn->rex_prefix.nbytes) {
355 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
356
357 *cursor &= 0xfe;
358 }
359
360
361
362
363 if (insn->vex_prefix.nbytes >= 3) {
364
365
366
367
368
369
370
371
372
373
374 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
375 *cursor |= 0x60;
376 }
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 reg = MODRM_REG(insn);
419 reg2 = 0xff;
420 if (insn->vex_prefix.nbytes)
421 reg2 = insn->vex_prefix.bytes[2];
422
423
424
425
426
427
428
429 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7;
430
431
432
433
434
435
436 if (reg != 6 && reg2 != 6) {
437 reg2 = 6;
438 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI;
439 } else if (reg != 7 && reg2 != 7) {
440 reg2 = 7;
441 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI;
442
443 } else {
444 reg2 = 3;
445 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX;
446 }
447
448
449
450
451
452 cursor = auprobe->insn + insn_offset_modrm(insn);
453
454
455
456
457
458 *cursor = 0x80 | (reg << 3) | reg2;
459}
460
461static inline unsigned long *
462scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
463{
464 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI)
465 return ®s->si;
466 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI)
467 return ®s->di;
468 return ®s->bx;
469}
470
471
472
473
474
475static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
476{
477 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
478 struct uprobe_task *utask = current->utask;
479 unsigned long *sr = scratch_reg(auprobe, regs);
480
481 utask->autask.saved_scratch_register = *sr;
482 *sr = utask->vaddr + auprobe->defparam.ilen;
483 }
484}
485
486static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
487{
488 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
489 struct uprobe_task *utask = current->utask;
490 unsigned long *sr = scratch_reg(auprobe, regs);
491
492 *sr = utask->autask.saved_scratch_register;
493 }
494}
495#else
496
497
498
499static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
500{
501}
502static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
503{
504}
505static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
506{
507}
508#endif
509
510struct uprobe_xol_ops {
511 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
512 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
513 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
514 void (*abort)(struct arch_uprobe *, struct pt_regs *);
515};
516
517static inline int sizeof_long(void)
518{
519 return in_ia32_syscall() ? 4 : 8;
520}
521
522static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
523{
524 riprel_pre_xol(auprobe, regs);
525 return 0;
526}
527
528static int push_ret_address(struct pt_regs *regs, unsigned long ip)
529{
530 unsigned long new_sp = regs->sp - sizeof_long();
531
532 if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
533 return -EFAULT;
534
535 regs->sp = new_sp;
536 return 0;
537}
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
557{
558 struct uprobe_task *utask = current->utask;
559
560 riprel_post_xol(auprobe, regs);
561 if (auprobe->defparam.fixups & UPROBE_FIX_IP) {
562 long correction = utask->vaddr - utask->xol_vaddr;
563 regs->ip += correction;
564 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
565 regs->sp += sizeof_long();
566 if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen))
567 return -ERESTART;
568 }
569
570 if (auprobe->defparam.fixups & UPROBE_FIX_SETF)
571 utask->autask.saved_tf = true;
572
573 return 0;
574}
575
576static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
577{
578 riprel_post_xol(auprobe, regs);
579}
580
581static const struct uprobe_xol_ops default_xol_ops = {
582 .pre_xol = default_pre_xol_op,
583 .post_xol = default_post_xol_op,
584 .abort = default_abort_op,
585};
586
587static bool branch_is_call(struct arch_uprobe *auprobe)
588{
589 return auprobe->branch.opc1 == 0xe8;
590}
591
592#define CASE_COND \
593 COND(70, 71, XF(OF)) \
594 COND(72, 73, XF(CF)) \
595 COND(74, 75, XF(ZF)) \
596 COND(78, 79, XF(SF)) \
597 COND(7a, 7b, XF(PF)) \
598 COND(76, 77, XF(CF) || XF(ZF)) \
599 COND(7c, 7d, XF(SF) != XF(OF)) \
600 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
601
602#define COND(op_y, op_n, expr) \
603 case 0x ## op_y: DO((expr) != 0) \
604 case 0x ## op_n: DO((expr) == 0)
605
606#define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
607
608static bool is_cond_jmp_opcode(u8 opcode)
609{
610 switch (opcode) {
611 #define DO(expr) \
612 return true;
613 CASE_COND
614 #undef DO
615
616 default:
617 return false;
618 }
619}
620
621static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
622{
623 unsigned long flags = regs->flags;
624
625 switch (auprobe->branch.opc1) {
626 #define DO(expr) \
627 return expr;
628 CASE_COND
629 #undef DO
630
631 default:
632 return true;
633 }
634}
635
636#undef XF
637#undef COND
638#undef CASE_COND
639
640static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
641{
642 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
643 unsigned long offs = (long)auprobe->branch.offs;
644
645 if (branch_is_call(auprobe)) {
646
647
648
649
650
651
652
653
654
655 if (push_ret_address(regs, new_ip))
656 return false;
657 } else if (!check_jmp_cond(auprobe, regs)) {
658 offs = 0;
659 }
660
661 regs->ip = new_ip + offs;
662 return true;
663}
664
665static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
666{
667 BUG_ON(!branch_is_call(auprobe));
668
669
670
671
672
673
674 regs->sp += sizeof_long();
675 return -ERESTART;
676}
677
678static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
679{
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694 memset(auprobe->insn + insn_offset_immediate(insn),
695 0, insn->immediate.nbytes);
696}
697
698static const struct uprobe_xol_ops branch_xol_ops = {
699 .emulate = branch_emulate_op,
700 .post_xol = branch_post_xol_op,
701};
702
703
704static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
705{
706 u8 opc1 = OPCODE1(insn);
707 int i;
708
709 switch (opc1) {
710 case 0xeb:
711 case 0xe9:
712 case 0x90:
713 break;
714
715 case 0xe8:
716 branch_clear_offset(auprobe, insn);
717 break;
718
719 case 0x0f:
720 if (insn->opcode.nbytes != 2)
721 return -ENOSYS;
722
723
724
725
726 opc1 = OPCODE2(insn) - 0x10;
727 default:
728 if (!is_cond_jmp_opcode(opc1))
729 return -ENOSYS;
730 }
731
732
733
734
735
736
737 for (i = 0; i < insn->prefixes.nbytes; i++) {
738 if (insn->prefixes.bytes[i] == 0x66)
739 return -ENOTSUPP;
740 }
741
742 auprobe->branch.opc1 = opc1;
743 auprobe->branch.ilen = insn->length;
744 auprobe->branch.offs = insn->immediate.value;
745
746 auprobe->ops = &branch_xol_ops;
747 return 0;
748}
749
750
751
752
753
754
755
756
757int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
758{
759 struct insn insn;
760 u8 fix_ip_or_call = UPROBE_FIX_IP;
761 int ret;
762
763 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
764 if (ret)
765 return ret;
766
767 ret = branch_setup_xol_ops(auprobe, &insn);
768 if (ret != -ENOSYS)
769 return ret;
770
771
772
773
774
775 switch (OPCODE1(&insn)) {
776 case 0x9d:
777 auprobe->defparam.fixups |= UPROBE_FIX_SETF;
778 break;
779 case 0xc3:
780 case 0xcb:
781 case 0xc2:
782 case 0xca:
783 case 0xea:
784 fix_ip_or_call = 0;
785 break;
786 case 0x9a:
787 fix_ip_or_call = UPROBE_FIX_CALL;
788 break;
789 case 0xff:
790 switch (MODRM_REG(&insn)) {
791 case 2: case 3:
792 fix_ip_or_call = UPROBE_FIX_CALL;
793 break;
794 case 4: case 5:
795 fix_ip_or_call = 0;
796 break;
797 }
798
799 default:
800 riprel_analyze(auprobe, &insn);
801 }
802
803 auprobe->defparam.ilen = insn.length;
804 auprobe->defparam.fixups |= fix_ip_or_call;
805
806 auprobe->ops = &default_xol_ops;
807 return 0;
808}
809
810
811
812
813
814
815int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
816{
817 struct uprobe_task *utask = current->utask;
818
819 if (auprobe->ops->pre_xol) {
820 int err = auprobe->ops->pre_xol(auprobe, regs);
821 if (err)
822 return err;
823 }
824
825 regs->ip = utask->xol_vaddr;
826 utask->autask.saved_trap_nr = current->thread.trap_nr;
827 current->thread.trap_nr = UPROBE_TRAP_NR;
828
829 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
830 regs->flags |= X86_EFLAGS_TF;
831 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
832 set_task_blockstep(current, false);
833
834 return 0;
835}
836
837
838
839
840
841
842
843
844
845
846
847bool arch_uprobe_xol_was_trapped(struct task_struct *t)
848{
849 if (t->thread.trap_nr != UPROBE_TRAP_NR)
850 return true;
851
852 return false;
853}
854
855
856
857
858
859
860
861
862int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
863{
864 struct uprobe_task *utask = current->utask;
865 bool send_sigtrap = utask->autask.saved_tf;
866 int err = 0;
867
868 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
869 current->thread.trap_nr = utask->autask.saved_trap_nr;
870
871 if (auprobe->ops->post_xol) {
872 err = auprobe->ops->post_xol(auprobe, regs);
873 if (err) {
874
875
876
877
878
879 regs->ip = utask->vaddr;
880 if (err == -ERESTART)
881 err = 0;
882 send_sigtrap = false;
883 }
884 }
885
886
887
888
889
890 if (send_sigtrap)
891 send_sig(SIGTRAP, current, 0);
892
893 if (!utask->autask.saved_tf)
894 regs->flags &= ~X86_EFLAGS_TF;
895
896 return err;
897}
898
899
900int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
901{
902 struct die_args *args = data;
903 struct pt_regs *regs = args->regs;
904 int ret = NOTIFY_DONE;
905
906
907 if (regs && !user_mode(regs))
908 return NOTIFY_DONE;
909
910 switch (val) {
911 case DIE_INT3:
912 if (uprobe_pre_sstep_notifier(regs))
913 ret = NOTIFY_STOP;
914
915 break;
916
917 case DIE_DEBUG:
918 if (uprobe_post_sstep_notifier(regs))
919 ret = NOTIFY_STOP;
920
921 default:
922 break;
923 }
924
925 return ret;
926}
927
928
929
930
931
932
933void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
934{
935 struct uprobe_task *utask = current->utask;
936
937 if (auprobe->ops->abort)
938 auprobe->ops->abort(auprobe, regs);
939
940 current->thread.trap_nr = utask->autask.saved_trap_nr;
941 regs->ip = utask->vaddr;
942
943 if (!utask->autask.saved_tf)
944 regs->flags &= ~X86_EFLAGS_TF;
945}
946
947static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
948{
949 if (auprobe->ops->emulate)
950 return auprobe->ops->emulate(auprobe, regs);
951 return false;
952}
953
954bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
955{
956 bool ret = __skip_sstep(auprobe, regs);
957 if (ret && (regs->flags & X86_EFLAGS_TF))
958 send_sig(SIGTRAP, current, 0);
959 return ret;
960}
961
962unsigned long
963arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
964{
965 int rasize = sizeof_long(), nleft;
966 unsigned long orig_ret_vaddr = 0;
967
968 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
969 return -1;
970
971
972 if (orig_ret_vaddr == trampoline_vaddr)
973 return orig_ret_vaddr;
974
975 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
976 if (likely(!nleft))
977 return orig_ret_vaddr;
978
979 if (nleft != rasize) {
980 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
981 "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
982
983 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
984 }
985
986 return -1;
987}
988
989bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
990 struct pt_regs *regs)
991{
992 if (ctx == RP_CHECK_CALL)
993 return regs->sp < ret->stack;
994 else
995 return regs->sp <= ret->stack;
996}
997