1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/ptrace.h>
13#include <linux/uprobes.h>
14#include <linux/uaccess.h>
15
16#include <linux/kdebug.h>
17#include <asm/processor.h>
18#include <asm/insn.h>
19#include <asm/mmu_context.h>
20
21
22
23
24#define UPROBE_FIX_IP 0x01
25
26
27#define UPROBE_FIX_CALL 0x02
28
29
30#define UPROBE_FIX_SETF 0x04
31
32#define UPROBE_FIX_RIP_SI 0x08
33#define UPROBE_FIX_RIP_DI 0x10
34#define UPROBE_FIX_RIP_BX 0x20
35#define UPROBE_FIX_RIP_MASK \
36 (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX)
37
38#define UPROBE_TRAP_NR UINT_MAX
39
40
41#define OPCODE1(insn) ((insn)->opcode.bytes[0])
42#define OPCODE2(insn) ((insn)->opcode.bytes[1])
43#define OPCODE3(insn) ((insn)->opcode.bytes[2])
44#define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
45
46#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
47 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
48 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
49 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
50 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
51 << (row % 32))
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
86static volatile u32 good_insns_32[256 / 32] = {
87
88
89 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) |
90 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) ,
91 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
92 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
93 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
94 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
95 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
96 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
97 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
98 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
99 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
100 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
101 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
102 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
103 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) |
104 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)
105
106
107};
108#else
109#define good_insns_32 NULL
110#endif
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147#if defined(CONFIG_X86_64)
148static volatile u32 good_insns_64[256 / 32] = {
149
150
151 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) |
152 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) ,
153 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) |
154 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) ,
155 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
156 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
157 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
158 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
159 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
160 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) ,
161 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
162 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
163 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
164 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
165 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) |
166 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)
167
168
169};
170#else
171#define good_insns_64 NULL
172#endif
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198static volatile u32 good_2byte_insns[256 / 32] = {
199
200
201 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) |
202 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
203 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
204 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) ,
205 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
206 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
207 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
208 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) ,
209 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
210 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
211 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) |
212 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
213 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
214 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
215 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
216 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
217
218
219};
220#undef W
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256static bool is_prefix_bad(struct insn *insn)
257{
258 insn_byte_t p;
259 int i;
260
261 for_each_insn_prefix(insn, i, p) {
262 insn_attr_t attr;
263
264 attr = inat_get_opcode_attribute(p);
265 switch (attr) {
266 case INAT_MAKE_PREFIX(INAT_PFX_ES):
267 case INAT_MAKE_PREFIX(INAT_PFX_CS):
268 case INAT_MAKE_PREFIX(INAT_PFX_DS):
269 case INAT_MAKE_PREFIX(INAT_PFX_SS):
270 case INAT_MAKE_PREFIX(INAT_PFX_LOCK):
271 return true;
272 }
273 }
274 return false;
275}
276
277static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
278{
279 enum insn_mode m = x86_64 ? INSN_MODE_64 : INSN_MODE_32;
280 u32 volatile *good_insns;
281 int ret;
282
283 ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m);
284 if (ret < 0)
285 return -ENOEXEC;
286
287 if (is_prefix_bad(insn))
288 return -ENOTSUPP;
289
290
291 if (insn_masking_exception(insn))
292 return -ENOTSUPP;
293
294 if (x86_64)
295 good_insns = good_insns_64;
296 else
297 good_insns = good_insns_32;
298
299 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
300 return 0;
301
302 if (insn->opcode.nbytes == 2) {
303 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
304 return 0;
305 }
306
307 return -ENOTSUPP;
308}
309
310#ifdef CONFIG_X86_64
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
336{
337 u8 *cursor;
338 u8 reg;
339 u8 reg2;
340
341 if (!insn_rip_relative(insn))
342 return;
343
344
345
346
347
348
349 if (insn->rex_prefix.nbytes) {
350 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
351
352 *cursor &= 0xfe;
353 }
354
355
356
357
358 if (insn->vex_prefix.nbytes >= 3) {
359
360
361
362
363
364
365
366
367
368
369 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
370 *cursor |= 0x60;
371 }
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413 reg = MODRM_REG(insn);
414 reg2 = 0xff;
415 if (insn->vex_prefix.nbytes)
416 reg2 = insn->vex_prefix.bytes[2];
417
418
419
420
421
422
423
424 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7;
425
426
427
428
429
430
431 if (reg != 6 && reg2 != 6) {
432 reg2 = 6;
433 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI;
434 } else if (reg != 7 && reg2 != 7) {
435 reg2 = 7;
436 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI;
437
438 } else {
439 reg2 = 3;
440 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX;
441 }
442
443
444
445
446
447 cursor = auprobe->insn + insn_offset_modrm(insn);
448
449
450
451
452
453 *cursor = 0x80 | (reg << 3) | reg2;
454}
455
456static inline unsigned long *
457scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
458{
459 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI)
460 return ®s->si;
461 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI)
462 return ®s->di;
463 return ®s->bx;
464}
465
466
467
468
469
470static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
471{
472 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
473 struct uprobe_task *utask = current->utask;
474 unsigned long *sr = scratch_reg(auprobe, regs);
475
476 utask->autask.saved_scratch_register = *sr;
477 *sr = utask->vaddr + auprobe->defparam.ilen;
478 }
479}
480
481static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
482{
483 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
484 struct uprobe_task *utask = current->utask;
485 unsigned long *sr = scratch_reg(auprobe, regs);
486
487 *sr = utask->autask.saved_scratch_register;
488 }
489}
490#else
491
492
493
494static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
495{
496}
497static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
498{
499}
500static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
501{
502}
503#endif
504
505struct uprobe_xol_ops {
506 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
507 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
508 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
509 void (*abort)(struct arch_uprobe *, struct pt_regs *);
510};
511
512static inline int sizeof_long(struct pt_regs *regs)
513{
514
515
516
517 return user_64bit_mode(regs) ? 8 : 4;
518}
519
520static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
521{
522 riprel_pre_xol(auprobe, regs);
523 return 0;
524}
525
526static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
527{
528 unsigned long new_sp = regs->sp - sizeof_long(regs);
529
530 if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
531 return -EFAULT;
532
533 regs->sp = new_sp;
534 return 0;
535}
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
555{
556 struct uprobe_task *utask = current->utask;
557
558 riprel_post_xol(auprobe, regs);
559 if (auprobe->defparam.fixups & UPROBE_FIX_IP) {
560 long correction = utask->vaddr - utask->xol_vaddr;
561 regs->ip += correction;
562 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
563 regs->sp += sizeof_long(regs);
564 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
565 return -ERESTART;
566 }
567
568 if (auprobe->defparam.fixups & UPROBE_FIX_SETF)
569 utask->autask.saved_tf = true;
570
571 return 0;
572}
573
574static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
575{
576 riprel_post_xol(auprobe, regs);
577}
578
579static const struct uprobe_xol_ops default_xol_ops = {
580 .pre_xol = default_pre_xol_op,
581 .post_xol = default_post_xol_op,
582 .abort = default_abort_op,
583};
584
585static bool branch_is_call(struct arch_uprobe *auprobe)
586{
587 return auprobe->branch.opc1 == 0xe8;
588}
589
590#define CASE_COND \
591 COND(70, 71, XF(OF)) \
592 COND(72, 73, XF(CF)) \
593 COND(74, 75, XF(ZF)) \
594 COND(78, 79, XF(SF)) \
595 COND(7a, 7b, XF(PF)) \
596 COND(76, 77, XF(CF) || XF(ZF)) \
597 COND(7c, 7d, XF(SF) != XF(OF)) \
598 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
599
600#define COND(op_y, op_n, expr) \
601 case 0x ## op_y: DO((expr) != 0) \
602 case 0x ## op_n: DO((expr) == 0)
603
604#define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
605
606static bool is_cond_jmp_opcode(u8 opcode)
607{
608 switch (opcode) {
609 #define DO(expr) \
610 return true;
611 CASE_COND
612 #undef DO
613
614 default:
615 return false;
616 }
617}
618
619static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
620{
621 unsigned long flags = regs->flags;
622
623 switch (auprobe->branch.opc1) {
624 #define DO(expr) \
625 return expr;
626 CASE_COND
627 #undef DO
628
629 default:
630 return true;
631 }
632}
633
634#undef XF
635#undef COND
636#undef CASE_COND
637
638static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
639{
640 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
641 unsigned long offs = (long)auprobe->branch.offs;
642
643 if (branch_is_call(auprobe)) {
644
645
646
647
648
649
650
651
652
653 if (emulate_push_stack(regs, new_ip))
654 return false;
655 } else if (!check_jmp_cond(auprobe, regs)) {
656 offs = 0;
657 }
658
659 regs->ip = new_ip + offs;
660 return true;
661}
662
663static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
664{
665 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset;
666
667 if (emulate_push_stack(regs, *src_ptr))
668 return false;
669 regs->ip += auprobe->push.ilen;
670 return true;
671}
672
673static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
674{
675 BUG_ON(!branch_is_call(auprobe));
676
677
678
679
680
681
682 regs->sp += sizeof_long(regs);
683 return -ERESTART;
684}
685
686static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
687{
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702 memset(auprobe->insn + insn_offset_immediate(insn),
703 0, insn->immediate.nbytes);
704}
705
706static const struct uprobe_xol_ops branch_xol_ops = {
707 .emulate = branch_emulate_op,
708 .post_xol = branch_post_xol_op,
709};
710
711static const struct uprobe_xol_ops push_xol_ops = {
712 .emulate = push_emulate_op,
713};
714
715
716static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
717{
718 u8 opc1 = OPCODE1(insn);
719 insn_byte_t p;
720 int i;
721
722 switch (opc1) {
723 case 0xeb:
724 case 0xe9:
725 case 0x90:
726 break;
727
728 case 0xe8:
729 branch_clear_offset(auprobe, insn);
730 break;
731
732 case 0x0f:
733 if (insn->opcode.nbytes != 2)
734 return -ENOSYS;
735
736
737
738
739 opc1 = OPCODE2(insn) - 0x10;
740 fallthrough;
741 default:
742 if (!is_cond_jmp_opcode(opc1))
743 return -ENOSYS;
744 }
745
746
747
748
749
750
751 for_each_insn_prefix(insn, i, p) {
752 if (p == 0x66)
753 return -ENOTSUPP;
754 }
755
756 auprobe->branch.opc1 = opc1;
757 auprobe->branch.ilen = insn->length;
758 auprobe->branch.offs = insn->immediate.value;
759
760 auprobe->ops = &branch_xol_ops;
761 return 0;
762}
763
764
765static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
766{
767 u8 opc1 = OPCODE1(insn), reg_offset = 0;
768
769 if (opc1 < 0x50 || opc1 > 0x57)
770 return -ENOSYS;
771
772 if (insn->length > 2)
773 return -ENOSYS;
774 if (insn->length == 2) {
775
776#ifdef CONFIG_X86_64
777 if (insn->rex_prefix.nbytes != 1 ||
778 insn->rex_prefix.bytes[0] != 0x41)
779 return -ENOSYS;
780
781 switch (opc1) {
782 case 0x50:
783 reg_offset = offsetof(struct pt_regs, r8);
784 break;
785 case 0x51:
786 reg_offset = offsetof(struct pt_regs, r9);
787 break;
788 case 0x52:
789 reg_offset = offsetof(struct pt_regs, r10);
790 break;
791 case 0x53:
792 reg_offset = offsetof(struct pt_regs, r11);
793 break;
794 case 0x54:
795 reg_offset = offsetof(struct pt_regs, r12);
796 break;
797 case 0x55:
798 reg_offset = offsetof(struct pt_regs, r13);
799 break;
800 case 0x56:
801 reg_offset = offsetof(struct pt_regs, r14);
802 break;
803 case 0x57:
804 reg_offset = offsetof(struct pt_regs, r15);
805 break;
806 }
807#else
808 return -ENOSYS;
809#endif
810 } else {
811 switch (opc1) {
812 case 0x50:
813 reg_offset = offsetof(struct pt_regs, ax);
814 break;
815 case 0x51:
816 reg_offset = offsetof(struct pt_regs, cx);
817 break;
818 case 0x52:
819 reg_offset = offsetof(struct pt_regs, dx);
820 break;
821 case 0x53:
822 reg_offset = offsetof(struct pt_regs, bx);
823 break;
824 case 0x54:
825 reg_offset = offsetof(struct pt_regs, sp);
826 break;
827 case 0x55:
828 reg_offset = offsetof(struct pt_regs, bp);
829 break;
830 case 0x56:
831 reg_offset = offsetof(struct pt_regs, si);
832 break;
833 case 0x57:
834 reg_offset = offsetof(struct pt_regs, di);
835 break;
836 }
837 }
838
839 auprobe->push.reg_offset = reg_offset;
840 auprobe->push.ilen = insn->length;
841 auprobe->ops = &push_xol_ops;
842 return 0;
843}
844
845
846
847
848
849
850
851
852int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
853{
854 struct insn insn;
855 u8 fix_ip_or_call = UPROBE_FIX_IP;
856 int ret;
857
858 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
859 if (ret)
860 return ret;
861
862 ret = branch_setup_xol_ops(auprobe, &insn);
863 if (ret != -ENOSYS)
864 return ret;
865
866 ret = push_setup_xol_ops(auprobe, &insn);
867 if (ret != -ENOSYS)
868 return ret;
869
870
871
872
873
874 switch (OPCODE1(&insn)) {
875 case 0x9d:
876 auprobe->defparam.fixups |= UPROBE_FIX_SETF;
877 break;
878 case 0xc3:
879 case 0xcb:
880 case 0xc2:
881 case 0xca:
882 case 0xea:
883 fix_ip_or_call = 0;
884 break;
885 case 0x9a:
886 fix_ip_or_call = UPROBE_FIX_CALL;
887 break;
888 case 0xff:
889 switch (MODRM_REG(&insn)) {
890 case 2: case 3:
891 fix_ip_or_call = UPROBE_FIX_CALL;
892 break;
893 case 4: case 5:
894 fix_ip_or_call = 0;
895 break;
896 }
897 fallthrough;
898 default:
899 riprel_analyze(auprobe, &insn);
900 }
901
902 auprobe->defparam.ilen = insn.length;
903 auprobe->defparam.fixups |= fix_ip_or_call;
904
905 auprobe->ops = &default_xol_ops;
906 return 0;
907}
908
909
910
911
912
913
914int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
915{
916 struct uprobe_task *utask = current->utask;
917
918 if (auprobe->ops->pre_xol) {
919 int err = auprobe->ops->pre_xol(auprobe, regs);
920 if (err)
921 return err;
922 }
923
924 regs->ip = utask->xol_vaddr;
925 utask->autask.saved_trap_nr = current->thread.trap_nr;
926 current->thread.trap_nr = UPROBE_TRAP_NR;
927
928 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
929 regs->flags |= X86_EFLAGS_TF;
930 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
931 set_task_blockstep(current, false);
932
933 return 0;
934}
935
936
937
938
939
940
941
942
943
944
945
946bool arch_uprobe_xol_was_trapped(struct task_struct *t)
947{
948 if (t->thread.trap_nr != UPROBE_TRAP_NR)
949 return true;
950
951 return false;
952}
953
954
955
956
957
958
959
960
961int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
962{
963 struct uprobe_task *utask = current->utask;
964 bool send_sigtrap = utask->autask.saved_tf;
965 int err = 0;
966
967 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
968 current->thread.trap_nr = utask->autask.saved_trap_nr;
969
970 if (auprobe->ops->post_xol) {
971 err = auprobe->ops->post_xol(auprobe, regs);
972 if (err) {
973
974
975
976
977
978 regs->ip = utask->vaddr;
979 if (err == -ERESTART)
980 err = 0;
981 send_sigtrap = false;
982 }
983 }
984
985
986
987
988
989 if (send_sigtrap)
990 send_sig(SIGTRAP, current, 0);
991
992 if (!utask->autask.saved_tf)
993 regs->flags &= ~X86_EFLAGS_TF;
994
995 return err;
996}
997
998
999int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
1000{
1001 struct die_args *args = data;
1002 struct pt_regs *regs = args->regs;
1003 int ret = NOTIFY_DONE;
1004
1005
1006 if (regs && !user_mode(regs))
1007 return NOTIFY_DONE;
1008
1009 switch (val) {
1010 case DIE_INT3:
1011 if (uprobe_pre_sstep_notifier(regs))
1012 ret = NOTIFY_STOP;
1013
1014 break;
1015
1016 case DIE_DEBUG:
1017 if (uprobe_post_sstep_notifier(regs))
1018 ret = NOTIFY_STOP;
1019
1020 break;
1021
1022 default:
1023 break;
1024 }
1025
1026 return ret;
1027}
1028
1029
1030
1031
1032
1033
1034void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1035{
1036 struct uprobe_task *utask = current->utask;
1037
1038 if (auprobe->ops->abort)
1039 auprobe->ops->abort(auprobe, regs);
1040
1041 current->thread.trap_nr = utask->autask.saved_trap_nr;
1042 regs->ip = utask->vaddr;
1043
1044 if (!utask->autask.saved_tf)
1045 regs->flags &= ~X86_EFLAGS_TF;
1046}
1047
1048static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1049{
1050 if (auprobe->ops->emulate)
1051 return auprobe->ops->emulate(auprobe, regs);
1052 return false;
1053}
1054
1055bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1056{
1057 bool ret = __skip_sstep(auprobe, regs);
1058 if (ret && (regs->flags & X86_EFLAGS_TF))
1059 send_sig(SIGTRAP, current, 0);
1060 return ret;
1061}
1062
1063unsigned long
1064arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1065{
1066 int rasize = sizeof_long(regs), nleft;
1067 unsigned long orig_ret_vaddr = 0;
1068
1069 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
1070 return -1;
1071
1072
1073 if (orig_ret_vaddr == trampoline_vaddr)
1074 return orig_ret_vaddr;
1075
1076 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
1077 if (likely(!nleft))
1078 return orig_ret_vaddr;
1079
1080 if (nleft != rasize) {
1081 pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n",
1082 current->pid, regs->sp, regs->ip);
1083
1084 force_sig(SIGSEGV);
1085 }
1086
1087 return -1;
1088}
1089
1090bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1091 struct pt_regs *regs)
1092{
1093 if (ctx == RP_CHECK_CALL)
1094 return regs->sp < ret->stack;
1095 else
1096 return regs->sp <= ret->stack;
1097}
1098