1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kprobes.h>
22#include <linux/ptrace.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/hardirq.h>
26#include <linux/preempt.h>
27#include <linux/module.h>
28#include <linux/kdebug.h>
29#include <linux/kallsyms.h>
30#include <linux/ftrace.h>
31
32#include <asm/cacheflush.h>
33#include <asm/desc.h>
34#include <asm/pgtable.h>
35#include <asm/uaccess.h>
36#include <asm/alternative.h>
37#include <asm/insn.h>
38#include <asm/debugreg.h>
39
40#include "common.h"
41
42unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
43{
44 struct optimized_kprobe *op;
45 struct kprobe *kp;
46 long offs;
47 int i;
48
49 for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
50 kp = get_kprobe((void *)addr - i);
51
52 if (kp && kprobe_optimized(kp)) {
53 op = container_of(kp, struct optimized_kprobe, kp);
54
55 if (list_empty(&op->list))
56 goto found;
57 }
58 }
59
60 return addr;
61found:
62
63
64
65
66
67 memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
68 if (addr == (unsigned long)kp->addr) {
69 buf[0] = kp->opcode;
70 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
71 } else {
72 offs = addr - (unsigned long)kp->addr - 1;
73 memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
74 }
75
76 return (unsigned long)buf;
77}
78
79
80static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
81{
82#ifdef CONFIG_X86_64
83 *addr++ = 0x48;
84 *addr++ = 0xbf;
85#else
86 *addr++ = 0xb8;
87#endif
88 *(unsigned long *)addr = val;
89}
90
91asm (
92 ".global optprobe_template_entry\n"
93 "optprobe_template_entry:\n"
94#ifdef CONFIG_X86_64
95
96 " pushq %rsp\n"
97 " pushfq\n"
98 SAVE_REGS_STRING
99 " movq %rsp, %rsi\n"
100 ".global optprobe_template_val\n"
101 "optprobe_template_val:\n"
102 ASM_NOP5
103 ASM_NOP5
104 ".global optprobe_template_call\n"
105 "optprobe_template_call:\n"
106 ASM_NOP5
107
108 " movq 144(%rsp), %rdx\n"
109 " movq %rdx, 152(%rsp)\n"
110 RESTORE_REGS_STRING
111
112 " addq $8, %rsp\n"
113 " popfq\n"
114#else
115 " pushf\n"
116 SAVE_REGS_STRING
117 " movl %esp, %edx\n"
118 ".global optprobe_template_val\n"
119 "optprobe_template_val:\n"
120 ASM_NOP5
121 ".global optprobe_template_call\n"
122 "optprobe_template_call:\n"
123 ASM_NOP5
124 RESTORE_REGS_STRING
125 " addl $4, %esp\n"
126 " popf\n"
127#endif
128 ".global optprobe_template_end\n"
129 "optprobe_template_end:\n");
130
131#define TMPL_MOVE_IDX \
132 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
133#define TMPL_CALL_IDX \
134 ((long)&optprobe_template_call - (long)&optprobe_template_entry)
135#define TMPL_END_IDX \
136 ((long)&optprobe_template_end - (long)&optprobe_template_entry)
137
138#define INT3_SIZE sizeof(kprobe_opcode_t)
139
140
141static void
142optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
143{
144 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
145 unsigned long flags;
146
147
148 if (kprobe_disabled(&op->kp))
149 return;
150
151 local_irq_save(flags);
152 if (kprobe_running()) {
153 kprobes_inc_nmissed_count(&op->kp);
154 } else {
155
156#ifdef CONFIG_X86_64
157 regs->cs = __KERNEL_CS;
158#else
159 regs->cs = __KERNEL_CS | get_kernel_rpl();
160 regs->gs = 0;
161#endif
162 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
163 regs->orig_ax = ~0UL;
164
165 __this_cpu_write(current_kprobe, &op->kp);
166 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
167 opt_pre_handler(&op->kp, regs);
168 __this_cpu_write(current_kprobe, NULL);
169 }
170 local_irq_restore(flags);
171}
172NOKPROBE_SYMBOL(optimized_callback);
173
174static int copy_optimized_instructions(u8 *dest, u8 *src)
175{
176 int len = 0, ret;
177
178 while (len < RELATIVEJUMP_SIZE) {
179 ret = __copy_instruction(dest + len, src + len);
180 if (!ret || !can_boost(dest + len))
181 return -EINVAL;
182 len += ret;
183 }
184
185 if (ftrace_text_reserved(src, src + len - 1) ||
186 alternatives_text_reserved(src, src + len - 1) ||
187 jump_label_text_reserved(src, src + len - 1))
188 return -EBUSY;
189
190 return len;
191}
192
193
194static int insn_is_indirect_jump(struct insn *insn)
195{
196 return ((insn->opcode.bytes[0] == 0xff &&
197 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) ||
198 insn->opcode.bytes[0] == 0xea);
199}
200
201
202static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
203{
204 unsigned long target = 0;
205
206 switch (insn->opcode.bytes[0]) {
207 case 0xe0:
208 case 0xe1:
209 case 0xe2:
210 case 0xe3:
211 case 0xe9:
212 case 0xeb:
213 break;
214 case 0x0f:
215 if ((insn->opcode.bytes[1] & 0xf0) == 0x80)
216 break;
217 return 0;
218 default:
219 if ((insn->opcode.bytes[0] & 0xf0) == 0x70)
220 break;
221 return 0;
222 }
223 target = (unsigned long)insn->next_byte + insn->immediate.value;
224
225 return (start <= target && target <= start + len);
226}
227
228
229static int can_optimize(unsigned long paddr)
230{
231 unsigned long addr, size = 0, offset = 0;
232 struct insn insn;
233 kprobe_opcode_t buf[MAX_INSN_SIZE];
234
235
236 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
237 return 0;
238
239
240
241
242
243 if ((paddr >= (unsigned long)__entry_text_start) &&
244 (paddr < (unsigned long)__entry_text_end))
245 return 0;
246
247
248 if (size - offset < RELATIVEJUMP_SIZE)
249 return 0;
250
251
252 addr = paddr - offset;
253 while (addr < paddr - offset + size) {
254 unsigned long recovered_insn;
255 if (search_exception_tables(addr))
256
257
258
259
260 return 0;
261 recovered_insn = recover_probed_instruction(buf, addr);
262 if (!recovered_insn)
263 return 0;
264 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
265 insn_get_length(&insn);
266
267 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
268 return 0;
269
270 insn.kaddr = (void *)addr;
271 insn.next_byte = (void *)(addr + insn.length);
272
273 if (insn_is_indirect_jump(&insn) ||
274 insn_jump_into_range(&insn, paddr + INT3_SIZE,
275 RELATIVE_ADDR_SIZE))
276 return 0;
277 addr += insn.length;
278 }
279
280 return 1;
281}
282
283
284int arch_check_optimized_kprobe(struct optimized_kprobe *op)
285{
286 int i;
287 struct kprobe *p;
288
289 for (i = 1; i < op->optinsn.size; i++) {
290 p = get_kprobe(op->kp.addr + i);
291 if (p && !kprobe_disabled(p))
292 return -EEXIST;
293 }
294
295 return 0;
296}
297
298
299int arch_within_optimized_kprobe(struct optimized_kprobe *op,
300 unsigned long addr)
301{
302 return ((unsigned long)op->kp.addr <= addr &&
303 (unsigned long)op->kp.addr + op->optinsn.size > addr);
304}
305
306
307static
308void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
309{
310 if (op->optinsn.insn) {
311 free_optinsn_slot(op->optinsn.insn, dirty);
312 op->optinsn.insn = NULL;
313 op->optinsn.size = 0;
314 }
315}
316
317void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
318{
319 __arch_remove_optimized_kprobe(op, 1);
320}
321
322
323
324
325
326
327int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
328 struct kprobe *__unused)
329{
330 u8 *buf;
331 int ret;
332 long rel;
333
334 if (!can_optimize((unsigned long)op->kp.addr))
335 return -EILSEQ;
336
337 op->optinsn.insn = get_optinsn_slot();
338 if (!op->optinsn.insn)
339 return -ENOMEM;
340
341
342
343
344
345 rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
346 if (abs(rel) > 0x7fffffff) {
347 __arch_remove_optimized_kprobe(op, 0);
348 return -ERANGE;
349 }
350
351 buf = (u8 *)op->optinsn.insn;
352
353
354 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
355 if (ret < 0) {
356 __arch_remove_optimized_kprobe(op, 0);
357 return ret;
358 }
359 op->optinsn.size = ret;
360
361
362 memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
363
364
365 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
366
367
368 synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
369
370
371 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
372 (u8 *)op->kp.addr + op->optinsn.size);
373
374 flush_icache_range((unsigned long) buf,
375 (unsigned long) buf + TMPL_END_IDX +
376 op->optinsn.size + RELATIVEJUMP_SIZE);
377 return 0;
378}
379
380
381
382
383
384void arch_optimize_kprobes(struct list_head *oplist)
385{
386 struct optimized_kprobe *op, *tmp;
387 u8 insn_buf[RELATIVEJUMP_SIZE];
388
389 list_for_each_entry_safe(op, tmp, oplist, list) {
390 s32 rel = (s32)((long)op->optinsn.insn -
391 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
392
393 WARN_ON(kprobe_disabled(&op->kp));
394
395
396 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
397 RELATIVE_ADDR_SIZE);
398
399 insn_buf[0] = RELATIVEJUMP_OPCODE;
400 *(s32 *)(&insn_buf[1]) = rel;
401
402 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
403 op->optinsn.insn);
404
405 list_del_init(&op->list);
406 }
407}
408
409
410void arch_unoptimize_kprobe(struct optimized_kprobe *op)
411{
412 u8 insn_buf[RELATIVEJUMP_SIZE];
413
414
415 insn_buf[0] = BREAKPOINT_INSTRUCTION;
416 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
417 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
418 op->optinsn.insn);
419}
420
421
422
423
424
425extern void arch_unoptimize_kprobes(struct list_head *oplist,
426 struct list_head *done_list)
427{
428 struct optimized_kprobe *op, *tmp;
429
430 list_for_each_entry_safe(op, tmp, oplist, list) {
431 arch_unoptimize_kprobe(op);
432 list_move(&op->list, done_list);
433 }
434}
435
436int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
437{
438 struct optimized_kprobe *op;
439
440 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
441
442 op = container_of(p, struct optimized_kprobe, kp);
443
444 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
445 if (!reenter)
446 reset_current_kprobe();
447 preempt_enable_no_resched();
448 return 1;
449 }
450 return 0;
451}
452NOKPROBE_SYMBOL(setup_detour_execution);
453