1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kprobes.h>
13#include <linux/jump_label.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/list.h>
17#include <asm/kprobes.h>
18#include <asm/ptrace.h>
19#include <asm/cacheflush.h>
20#include <asm/code-patching.h>
21#include <asm/sstep.h>
22#include <asm/ppc-opcode.h>
23
24#define TMPL_CALL_HDLR_IDX \
25 (optprobe_template_call_handler - optprobe_template_entry)
26#define TMPL_EMULATE_IDX \
27 (optprobe_template_call_emulate - optprobe_template_entry)
28#define TMPL_RET_IDX \
29 (optprobe_template_ret - optprobe_template_entry)
30#define TMPL_OP_IDX \
31 (optprobe_template_op_address - optprobe_template_entry)
32#define TMPL_INSN_IDX \
33 (optprobe_template_insn - optprobe_template_entry)
34#define TMPL_END_IDX \
35 (optprobe_template_end - optprobe_template_entry)
36
37DEFINE_INSN_CACHE_OPS(ppc_optinsn);
38
39static bool insn_page_in_use;
40
41static void *__ppc_alloc_insn_page(void)
42{
43 if (insn_page_in_use)
44 return NULL;
45 insn_page_in_use = true;
46 return &optinsn_slot;
47}
48
49static void __ppc_free_insn_page(void *page __maybe_unused)
50{
51 insn_page_in_use = false;
52}
53
54struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
55 .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
56 .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
57
58 .alloc = __ppc_alloc_insn_page,
59 .free = __ppc_free_insn_page,
60 .nr_garbage = 0,
61};
62
63
64
65
66
67static unsigned long can_optimize(struct kprobe *p)
68{
69 struct pt_regs regs;
70 struct instruction_op op;
71 unsigned long nip = 0;
72
73
74
75
76
77
78 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
79 return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
80
81
82
83
84
85
86
87 if (!is_kernel_addr((unsigned long)p->addr))
88 return 0;
89
90 memset(®s, 0, sizeof(struct pt_regs));
91 regs.nip = (unsigned long)p->addr;
92 regs.trap = 0x0;
93 regs.msr = MSR_KERNEL;
94
95
96
97
98
99
100
101
102
103
104
105
106 if (!is_conditional_branch(*p->ainsn.insn) &&
107 analyse_instr(&op, ®s, *p->ainsn.insn))
108 nip = regs.nip;
109
110 return nip;
111}
112
113static void optimized_callback(struct optimized_kprobe *op,
114 struct pt_regs *regs)
115{
116 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
117 unsigned long flags;
118
119
120 if (kprobe_disabled(&op->kp))
121 return;
122
123 local_irq_save(flags);
124 hard_irq_disable();
125
126 if (kprobe_running()) {
127 kprobes_inc_nmissed_count(&op->kp);
128 } else {
129 __this_cpu_write(current_kprobe, &op->kp);
130 regs->nip = (unsigned long)op->kp.addr;
131 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
132 opt_pre_handler(&op->kp, regs);
133 __this_cpu_write(current_kprobe, NULL);
134 }
135
136
137
138
139
140
141 local_irq_restore(flags);
142}
143NOKPROBE_SYMBOL(optimized_callback);
144
145void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
146{
147 if (op->optinsn.insn) {
148 free_ppc_optinsn_slot(op->optinsn.insn, 1);
149 op->optinsn.insn = NULL;
150 }
151}
152
153
154
155
156
157
158void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
159{
160
161 patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
162 ((val >> 16) & 0xffff));
163 addr++;
164
165
166 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
167 ___PPC_RS(4) | (val & 0xffff));
168}
169
170
171
172
173
174void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
175{
176
177 patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
178 ((val >> 48) & 0xffff));
179 addr++;
180
181
182 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
183 ___PPC_RS(3) | ((val >> 32) & 0xffff));
184 addr++;
185
186
187 patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
188 ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
189 addr++;
190
191
192 patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
193 ___PPC_RS(3) | ((val >> 16) & 0xffff));
194 addr++;
195
196
197 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
198 ___PPC_RS(3) | (val & 0xffff));
199}
200
201int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
202{
203 kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
204 kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
205 long b_offset;
206 unsigned long nip, size;
207 int rc, i;
208
209 kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
210
211 nip = can_optimize(p);
212 if (!nip)
213 return -EILSEQ;
214
215
216 buff = get_ppc_optinsn_slot();
217 if (!buff)
218 return -ENOMEM;
219
220
221
222
223
224
225
226
227
228
229 b_offset = (unsigned long)buff - (unsigned long)p->addr;
230 if (!is_offset_in_branch_range(b_offset))
231 goto error;
232
233
234 b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
235 (unsigned long)nip;
236 if (!is_offset_in_branch_range(b_offset))
237 goto error;
238
239
240
241 size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
242 pr_devel("Copying template to %p, size %lu\n", buff, size);
243 for (i = 0; i < size; i++) {
244 rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
245 if (rc < 0)
246 goto error;
247 }
248
249
250
251
252
253 patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
254
255
256
257
258 op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
259 emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
260 if (!op_callback_addr || !emulate_step_addr) {
261 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
262 goto error;
263 }
264
265 branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
266 (unsigned long)op_callback_addr,
267 BRANCH_SET_LINK);
268
269 branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
270 (unsigned long)emulate_step_addr,
271 BRANCH_SET_LINK);
272
273 if (!branch_op_callback || !branch_emulate_step)
274 goto error;
275
276 patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
277 patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
278
279
280
281
282 patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
283
284
285
286
287 patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
288
289 flush_icache_range((unsigned long)buff,
290 (unsigned long)(&buff[TMPL_END_IDX]));
291
292 op->optinsn.insn = buff;
293
294 return 0;
295
296error:
297 free_ppc_optinsn_slot(buff, 0);
298 return -ERANGE;
299
300}
301
302int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
303{
304 return optinsn->insn != NULL;
305}
306
307
308
309
310
311
312int arch_check_optimized_kprobe(struct optimized_kprobe *op)
313{
314 return 0;
315}
316
317void arch_optimize_kprobes(struct list_head *oplist)
318{
319 struct optimized_kprobe *op;
320 struct optimized_kprobe *tmp;
321
322 list_for_each_entry_safe(op, tmp, oplist, list) {
323
324
325
326
327 memcpy(op->optinsn.copied_insn, op->kp.addr,
328 RELATIVEJUMP_SIZE);
329 patch_instruction(op->kp.addr,
330 create_branch((unsigned int *)op->kp.addr,
331 (unsigned long)op->optinsn.insn, 0));
332 list_del_init(&op->list);
333 }
334}
335
336void arch_unoptimize_kprobe(struct optimized_kprobe *op)
337{
338 arch_arm_kprobe(&op->kp);
339}
340
341void arch_unoptimize_kprobes(struct list_head *oplist,
342 struct list_head *done_list)
343{
344 struct optimized_kprobe *op;
345 struct optimized_kprobe *tmp;
346
347 list_for_each_entry_safe(op, tmp, oplist, list) {
348 arch_unoptimize_kprobe(op);
349 list_move(&op->list, done_list);
350 }
351}
352
353int arch_within_optimized_kprobe(struct optimized_kprobe *op,
354 unsigned long addr)
355{
356 return ((unsigned long)op->kp.addr <= addr &&
357 (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
358}
359