1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/ptrace.h>
13#include <linux/uprobes.h>
14#include <linux/uaccess.h>
15
16#include <linux/kdebug.h>
17#include <asm/processor.h>
18#include <asm/insn.h>
19#include <asm/mmu_context.h>
20
21
22
23
24#define UPROBE_FIX_IP 0x01
25
26
27#define UPROBE_FIX_CALL 0x02
28
29
30#define UPROBE_FIX_SETF 0x04
31
32#define UPROBE_FIX_RIP_SI 0x08
33#define UPROBE_FIX_RIP_DI 0x10
34#define UPROBE_FIX_RIP_BX 0x20
35#define UPROBE_FIX_RIP_MASK \
36 (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX)
37
38#define UPROBE_TRAP_NR UINT_MAX
39
40
41#define OPCODE1(insn) ((insn)->opcode.bytes[0])
42#define OPCODE2(insn) ((insn)->opcode.bytes[1])
43#define OPCODE3(insn) ((insn)->opcode.bytes[2])
44#define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
45
46#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
47 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
48 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
49 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
50 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
51 << (row % 32))
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
86static volatile u32 good_insns_32[256 / 32] = {
87
88
89 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) |
90 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) ,
91 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
92 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
93 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
94 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
95 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
96 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
97 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
98 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
99 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
100 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
101 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
102 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
103 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) |
104 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)
105
106
107};
108#else
109#define good_insns_32 NULL
110#endif
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147#if defined(CONFIG_X86_64)
148static volatile u32 good_insns_64[256 / 32] = {
149
150
151 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) |
152 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) ,
153 W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) |
154 W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) ,
155 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
156 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
157 W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
158 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
159 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
160 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) ,
161 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
162 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
163 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) |
164 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
165 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) |
166 W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)
167
168
169};
170#else
171#define good_insns_64 NULL
172#endif
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198static volatile u32 good_2byte_insns[256 / 32] = {
199
200
201 W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) |
202 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
203 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
204 W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) ,
205 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
206 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
207 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
208 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) ,
209 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
210 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
211 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) |
212 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
213 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
214 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ,
215 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
216 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
217
218
219};
220#undef W
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256static bool is_prefix_bad(struct insn *insn)
257{
258 int i;
259
260 for (i = 0; i < insn->prefixes.nbytes; i++) {
261 insn_attr_t attr;
262
263 attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
264 switch (attr) {
265 case INAT_MAKE_PREFIX(INAT_PFX_ES):
266 case INAT_MAKE_PREFIX(INAT_PFX_CS):
267 case INAT_MAKE_PREFIX(INAT_PFX_DS):
268 case INAT_MAKE_PREFIX(INAT_PFX_SS):
269 case INAT_MAKE_PREFIX(INAT_PFX_LOCK):
270 return true;
271 }
272 }
273 return false;
274}
275
276static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
277{
278 u32 volatile *good_insns;
279
280 insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
281
282 insn_get_length(insn);
283 if (!insn_complete(insn))
284 return -ENOEXEC;
285
286 if (is_prefix_bad(insn))
287 return -ENOTSUPP;
288
289
290 if (insn_masking_exception(insn))
291 return -ENOTSUPP;
292
293 if (x86_64)
294 good_insns = good_insns_64;
295 else
296 good_insns = good_insns_32;
297
298 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
299 return 0;
300
301 if (insn->opcode.nbytes == 2) {
302 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
303 return 0;
304 }
305
306 return -ENOTSUPP;
307}
308
309#ifdef CONFIG_X86_64
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
335{
336 u8 *cursor;
337 u8 reg;
338 u8 reg2;
339
340 if (!insn_rip_relative(insn))
341 return;
342
343
344
345
346
347
348 if (insn->rex_prefix.nbytes) {
349 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
350
351 *cursor &= 0xfe;
352 }
353
354
355
356
357 if (insn->vex_prefix.nbytes >= 3) {
358
359
360
361
362
363
364
365
366
367
368 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
369 *cursor |= 0x60;
370 }
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412 reg = MODRM_REG(insn);
413 reg2 = 0xff;
414 if (insn->vex_prefix.nbytes)
415 reg2 = insn->vex_prefix.bytes[2];
416
417
418
419
420
421
422
423 reg2 = ((reg2 >> 3) & 0x7) ^ 0x7;
424
425
426
427
428
429
430 if (reg != 6 && reg2 != 6) {
431 reg2 = 6;
432 auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI;
433 } else if (reg != 7 && reg2 != 7) {
434 reg2 = 7;
435 auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI;
436
437 } else {
438 reg2 = 3;
439 auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX;
440 }
441
442
443
444
445
446 cursor = auprobe->insn + insn_offset_modrm(insn);
447
448
449
450
451
452 *cursor = 0x80 | (reg << 3) | reg2;
453}
454
455static inline unsigned long *
456scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
457{
458 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI)
459 return ®s->si;
460 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI)
461 return ®s->di;
462 return ®s->bx;
463}
464
465
466
467
468
469static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
470{
471 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
472 struct uprobe_task *utask = current->utask;
473 unsigned long *sr = scratch_reg(auprobe, regs);
474
475 utask->autask.saved_scratch_register = *sr;
476 *sr = utask->vaddr + auprobe->defparam.ilen;
477 }
478}
479
480static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
481{
482 if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
483 struct uprobe_task *utask = current->utask;
484 unsigned long *sr = scratch_reg(auprobe, regs);
485
486 *sr = utask->autask.saved_scratch_register;
487 }
488}
489#else
490
491
492
493static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
494{
495}
496static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
497{
498}
499static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
500{
501}
502#endif
503
504struct uprobe_xol_ops {
505 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
506 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
507 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
508 void (*abort)(struct arch_uprobe *, struct pt_regs *);
509};
510
511static inline int sizeof_long(struct pt_regs *regs)
512{
513
514
515
516 return user_64bit_mode(regs) ? 8 : 4;
517}
518
519static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
520{
521 riprel_pre_xol(auprobe, regs);
522 return 0;
523}
524
525static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
526{
527 unsigned long new_sp = regs->sp - sizeof_long(regs);
528
529 if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
530 return -EFAULT;
531
532 regs->sp = new_sp;
533 return 0;
534}
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
554{
555 struct uprobe_task *utask = current->utask;
556
557 riprel_post_xol(auprobe, regs);
558 if (auprobe->defparam.fixups & UPROBE_FIX_IP) {
559 long correction = utask->vaddr - utask->xol_vaddr;
560 regs->ip += correction;
561 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
562 regs->sp += sizeof_long(regs);
563 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
564 return -ERESTART;
565 }
566
567 if (auprobe->defparam.fixups & UPROBE_FIX_SETF)
568 utask->autask.saved_tf = true;
569
570 return 0;
571}
572
573static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
574{
575 riprel_post_xol(auprobe, regs);
576}
577
578static const struct uprobe_xol_ops default_xol_ops = {
579 .pre_xol = default_pre_xol_op,
580 .post_xol = default_post_xol_op,
581 .abort = default_abort_op,
582};
583
584static bool branch_is_call(struct arch_uprobe *auprobe)
585{
586 return auprobe->branch.opc1 == 0xe8;
587}
588
589#define CASE_COND \
590 COND(70, 71, XF(OF)) \
591 COND(72, 73, XF(CF)) \
592 COND(74, 75, XF(ZF)) \
593 COND(78, 79, XF(SF)) \
594 COND(7a, 7b, XF(PF)) \
595 COND(76, 77, XF(CF) || XF(ZF)) \
596 COND(7c, 7d, XF(SF) != XF(OF)) \
597 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
598
599#define COND(op_y, op_n, expr) \
600 case 0x ## op_y: DO((expr) != 0) \
601 case 0x ## op_n: DO((expr) == 0)
602
603#define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
604
605static bool is_cond_jmp_opcode(u8 opcode)
606{
607 switch (opcode) {
608 #define DO(expr) \
609 return true;
610 CASE_COND
611 #undef DO
612
613 default:
614 return false;
615 }
616}
617
618static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
619{
620 unsigned long flags = regs->flags;
621
622 switch (auprobe->branch.opc1) {
623 #define DO(expr) \
624 return expr;
625 CASE_COND
626 #undef DO
627
628 default:
629 return true;
630 }
631}
632
633#undef XF
634#undef COND
635#undef CASE_COND
636
637static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
638{
639 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
640 unsigned long offs = (long)auprobe->branch.offs;
641
642 if (branch_is_call(auprobe)) {
643
644
645
646
647
648
649
650
651
652 if (emulate_push_stack(regs, new_ip))
653 return false;
654 } else if (!check_jmp_cond(auprobe, regs)) {
655 offs = 0;
656 }
657
658 regs->ip = new_ip + offs;
659 return true;
660}
661
662static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
663{
664 unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset;
665
666 if (emulate_push_stack(regs, *src_ptr))
667 return false;
668 regs->ip += auprobe->push.ilen;
669 return true;
670}
671
672static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
673{
674 BUG_ON(!branch_is_call(auprobe));
675
676
677
678
679
680
681 regs->sp += sizeof_long(regs);
682 return -ERESTART;
683}
684
685static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
686{
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701 memset(auprobe->insn + insn_offset_immediate(insn),
702 0, insn->immediate.nbytes);
703}
704
705static const struct uprobe_xol_ops branch_xol_ops = {
706 .emulate = branch_emulate_op,
707 .post_xol = branch_post_xol_op,
708};
709
710static const struct uprobe_xol_ops push_xol_ops = {
711 .emulate = push_emulate_op,
712};
713
714
715static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
716{
717 u8 opc1 = OPCODE1(insn);
718 int i;
719
720 switch (opc1) {
721 case 0xeb:
722 case 0xe9:
723 case 0x90:
724 break;
725
726 case 0xe8:
727 branch_clear_offset(auprobe, insn);
728 break;
729
730 case 0x0f:
731 if (insn->opcode.nbytes != 2)
732 return -ENOSYS;
733
734
735
736
737 opc1 = OPCODE2(insn) - 0x10;
738
739 default:
740 if (!is_cond_jmp_opcode(opc1))
741 return -ENOSYS;
742 }
743
744
745
746
747
748
749 for (i = 0; i < insn->prefixes.nbytes; i++) {
750 if (insn->prefixes.bytes[i] == 0x66)
751 return -ENOTSUPP;
752 }
753
754 auprobe->branch.opc1 = opc1;
755 auprobe->branch.ilen = insn->length;
756 auprobe->branch.offs = insn->immediate.value;
757
758 auprobe->ops = &branch_xol_ops;
759 return 0;
760}
761
762
763static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
764{
765 u8 opc1 = OPCODE1(insn), reg_offset = 0;
766
767 if (opc1 < 0x50 || opc1 > 0x57)
768 return -ENOSYS;
769
770 if (insn->length > 2)
771 return -ENOSYS;
772 if (insn->length == 2) {
773
774#ifdef CONFIG_X86_64
775 if (insn->rex_prefix.nbytes != 1 ||
776 insn->rex_prefix.bytes[0] != 0x41)
777 return -ENOSYS;
778
779 switch (opc1) {
780 case 0x50:
781 reg_offset = offsetof(struct pt_regs, r8);
782 break;
783 case 0x51:
784 reg_offset = offsetof(struct pt_regs, r9);
785 break;
786 case 0x52:
787 reg_offset = offsetof(struct pt_regs, r10);
788 break;
789 case 0x53:
790 reg_offset = offsetof(struct pt_regs, r11);
791 break;
792 case 0x54:
793 reg_offset = offsetof(struct pt_regs, r12);
794 break;
795 case 0x55:
796 reg_offset = offsetof(struct pt_regs, r13);
797 break;
798 case 0x56:
799 reg_offset = offsetof(struct pt_regs, r14);
800 break;
801 case 0x57:
802 reg_offset = offsetof(struct pt_regs, r15);
803 break;
804 }
805#else
806 return -ENOSYS;
807#endif
808 } else {
809 switch (opc1) {
810 case 0x50:
811 reg_offset = offsetof(struct pt_regs, ax);
812 break;
813 case 0x51:
814 reg_offset = offsetof(struct pt_regs, cx);
815 break;
816 case 0x52:
817 reg_offset = offsetof(struct pt_regs, dx);
818 break;
819 case 0x53:
820 reg_offset = offsetof(struct pt_regs, bx);
821 break;
822 case 0x54:
823 reg_offset = offsetof(struct pt_regs, sp);
824 break;
825 case 0x55:
826 reg_offset = offsetof(struct pt_regs, bp);
827 break;
828 case 0x56:
829 reg_offset = offsetof(struct pt_regs, si);
830 break;
831 case 0x57:
832 reg_offset = offsetof(struct pt_regs, di);
833 break;
834 }
835 }
836
837 auprobe->push.reg_offset = reg_offset;
838 auprobe->push.ilen = insn->length;
839 auprobe->ops = &push_xol_ops;
840 return 0;
841}
842
843
844
845
846
847
848
849
850int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
851{
852 struct insn insn;
853 u8 fix_ip_or_call = UPROBE_FIX_IP;
854 int ret;
855
856 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
857 if (ret)
858 return ret;
859
860 ret = branch_setup_xol_ops(auprobe, &insn);
861 if (ret != -ENOSYS)
862 return ret;
863
864 ret = push_setup_xol_ops(auprobe, &insn);
865 if (ret != -ENOSYS)
866 return ret;
867
868
869
870
871
872 switch (OPCODE1(&insn)) {
873 case 0x9d:
874 auprobe->defparam.fixups |= UPROBE_FIX_SETF;
875 break;
876 case 0xc3:
877 case 0xcb:
878 case 0xc2:
879 case 0xca:
880 case 0xea:
881 fix_ip_or_call = 0;
882 break;
883 case 0x9a:
884 fix_ip_or_call = UPROBE_FIX_CALL;
885 break;
886 case 0xff:
887 switch (MODRM_REG(&insn)) {
888 case 2: case 3:
889 fix_ip_or_call = UPROBE_FIX_CALL;
890 break;
891 case 4: case 5:
892 fix_ip_or_call = 0;
893 break;
894 }
895
896 default:
897 riprel_analyze(auprobe, &insn);
898 }
899
900 auprobe->defparam.ilen = insn.length;
901 auprobe->defparam.fixups |= fix_ip_or_call;
902
903 auprobe->ops = &default_xol_ops;
904 return 0;
905}
906
907
908
909
910
911
912int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
913{
914 struct uprobe_task *utask = current->utask;
915
916 if (auprobe->ops->pre_xol) {
917 int err = auprobe->ops->pre_xol(auprobe, regs);
918 if (err)
919 return err;
920 }
921
922 regs->ip = utask->xol_vaddr;
923 utask->autask.saved_trap_nr = current->thread.trap_nr;
924 current->thread.trap_nr = UPROBE_TRAP_NR;
925
926 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
927 regs->flags |= X86_EFLAGS_TF;
928 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
929 set_task_blockstep(current, false);
930
931 return 0;
932}
933
934
935
936
937
938
939
940
941
942
943
944bool arch_uprobe_xol_was_trapped(struct task_struct *t)
945{
946 if (t->thread.trap_nr != UPROBE_TRAP_NR)
947 return true;
948
949 return false;
950}
951
952
953
954
955
956
957
958
959int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
960{
961 struct uprobe_task *utask = current->utask;
962 bool send_sigtrap = utask->autask.saved_tf;
963 int err = 0;
964
965 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
966 current->thread.trap_nr = utask->autask.saved_trap_nr;
967
968 if (auprobe->ops->post_xol) {
969 err = auprobe->ops->post_xol(auprobe, regs);
970 if (err) {
971
972
973
974
975
976 regs->ip = utask->vaddr;
977 if (err == -ERESTART)
978 err = 0;
979 send_sigtrap = false;
980 }
981 }
982
983
984
985
986
987 if (send_sigtrap)
988 send_sig(SIGTRAP, current, 0);
989
990 if (!utask->autask.saved_tf)
991 regs->flags &= ~X86_EFLAGS_TF;
992
993 return err;
994}
995
996
997int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
998{
999 struct die_args *args = data;
1000 struct pt_regs *regs = args->regs;
1001 int ret = NOTIFY_DONE;
1002
1003
1004 if (regs && !user_mode(regs))
1005 return NOTIFY_DONE;
1006
1007 switch (val) {
1008 case DIE_INT3:
1009 if (uprobe_pre_sstep_notifier(regs))
1010 ret = NOTIFY_STOP;
1011
1012 break;
1013
1014 case DIE_DEBUG:
1015 if (uprobe_post_sstep_notifier(regs))
1016 ret = NOTIFY_STOP;
1017
1018 default:
1019 break;
1020 }
1021
1022 return ret;
1023}
1024
1025
1026
1027
1028
1029
1030void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
1031{
1032 struct uprobe_task *utask = current->utask;
1033
1034 if (auprobe->ops->abort)
1035 auprobe->ops->abort(auprobe, regs);
1036
1037 current->thread.trap_nr = utask->autask.saved_trap_nr;
1038 regs->ip = utask->vaddr;
1039
1040 if (!utask->autask.saved_tf)
1041 regs->flags &= ~X86_EFLAGS_TF;
1042}
1043
1044static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1045{
1046 if (auprobe->ops->emulate)
1047 return auprobe->ops->emulate(auprobe, regs);
1048 return false;
1049}
1050
1051bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1052{
1053 bool ret = __skip_sstep(auprobe, regs);
1054 if (ret && (regs->flags & X86_EFLAGS_TF))
1055 send_sig(SIGTRAP, current, 0);
1056 return ret;
1057}
1058
1059unsigned long
1060arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1061{
1062 int rasize = sizeof_long(regs), nleft;
1063 unsigned long orig_ret_vaddr = 0;
1064
1065 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
1066 return -1;
1067
1068
1069 if (orig_ret_vaddr == trampoline_vaddr)
1070 return orig_ret_vaddr;
1071
1072 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
1073 if (likely(!nleft))
1074 return orig_ret_vaddr;
1075
1076 if (nleft != rasize) {
1077 pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n",
1078 current->pid, regs->sp, regs->ip);
1079
1080 force_sig(SIGSEGV);
1081 }
1082
1083 return -1;
1084}
1085
1086bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1087 struct pt_regs *regs)
1088{
1089 if (ctx == RP_CHECK_CALL)
1090 return regs->sp < ret->stack;
1091 else
1092 return regs->sp <= ret->stack;
1093}
1094