1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kprobes.h>
13#include <linux/jump_label.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/list.h>
17#include <asm/kprobes.h>
18#include <asm/ptrace.h>
19#include <asm/cacheflush.h>
20#include <asm/code-patching.h>
21#include <asm/sstep.h>
22#include <asm/ppc-opcode.h>
23
24#define TMPL_CALL_HDLR_IDX \
25 (optprobe_template_call_handler - optprobe_template_entry)
26#define TMPL_EMULATE_IDX \
27 (optprobe_template_call_emulate - optprobe_template_entry)
28#define TMPL_RET_IDX \
29 (optprobe_template_ret - optprobe_template_entry)
30#define TMPL_OP_IDX \
31 (optprobe_template_op_address - optprobe_template_entry)
32#define TMPL_INSN_IDX \
33 (optprobe_template_insn - optprobe_template_entry)
34#define TMPL_END_IDX \
35 (optprobe_template_end - optprobe_template_entry)
36
37DEFINE_INSN_CACHE_OPS(ppc_optinsn);
38
39static bool insn_page_in_use;
40
41static void *__ppc_alloc_insn_page(void)
42{
43 if (insn_page_in_use)
44 return NULL;
45 insn_page_in_use = true;
46 return &optinsn_slot;
47}
48
49static void __ppc_free_insn_page(void *page __maybe_unused)
50{
51 insn_page_in_use = false;
52}
53
54struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
55 .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
56 .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
57
58 .alloc = __ppc_alloc_insn_page,
59 .free = __ppc_free_insn_page,
60 .nr_garbage = 0,
61};
62
63
64
65
66
67static unsigned long can_optimize(struct kprobe *p)
68{
69 struct pt_regs regs;
70 struct instruction_op op;
71 unsigned long nip = 0;
72
73
74
75
76
77
78 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
79 return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
80
81
82
83
84
85
86
87 if (!is_kernel_addr((unsigned long)p->addr))
88 return 0;
89
90 memset(®s, 0, sizeof(struct pt_regs));
91 regs.nip = (unsigned long)p->addr;
92 regs.trap = 0x0;
93 regs.msr = MSR_KERNEL;
94
95
96
97
98
99
100
101
102
103
104
105
106 if (!is_conditional_branch(*p->ainsn.insn) &&
107 analyse_instr(&op, ®s, *p->ainsn.insn) == 1) {
108 emulate_update_regs(®s, &op);
109 nip = regs.nip;
110 }
111
112 return nip;
113}
114
115static void optimized_callback(struct optimized_kprobe *op,
116 struct pt_regs *regs)
117{
118 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
119 unsigned long flags;
120
121
122 if (kprobe_disabled(&op->kp))
123 return;
124
125 local_irq_save(flags);
126 hard_irq_disable();
127
128 if (kprobe_running()) {
129 kprobes_inc_nmissed_count(&op->kp);
130 } else {
131 __this_cpu_write(current_kprobe, &op->kp);
132 regs->nip = (unsigned long)op->kp.addr;
133 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
134 opt_pre_handler(&op->kp, regs);
135 __this_cpu_write(current_kprobe, NULL);
136 }
137
138
139
140
141
142
143 local_irq_restore(flags);
144}
145NOKPROBE_SYMBOL(optimized_callback);
146
147void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
148{
149 if (op->optinsn.insn) {
150 free_ppc_optinsn_slot(op->optinsn.insn, 1);
151 op->optinsn.insn = NULL;
152 }
153}
154
155
156
157
158
159
160void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
161{
162
163 patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
164 ((val >> 16) & 0xffff));
165 addr++;
166
167
168 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
169 ___PPC_RS(4) | (val & 0xffff));
170}
171
172
173
174
175
176void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
177{
178
179 patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
180 ((val >> 48) & 0xffff));
181 addr++;
182
183
184 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
185 ___PPC_RS(3) | ((val >> 32) & 0xffff));
186 addr++;
187
188
189 patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
190 ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
191 addr++;
192
193
194 patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
195 ___PPC_RS(3) | ((val >> 16) & 0xffff));
196 addr++;
197
198
199 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
200 ___PPC_RS(3) | (val & 0xffff));
201}
202
203int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
204{
205 kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
206 kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
207 long b_offset;
208 unsigned long nip, size;
209 int rc, i;
210
211 kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
212
213 nip = can_optimize(p);
214 if (!nip)
215 return -EILSEQ;
216
217
218 buff = get_ppc_optinsn_slot();
219 if (!buff)
220 return -ENOMEM;
221
222
223
224
225
226
227
228
229
230
231 b_offset = (unsigned long)buff - (unsigned long)p->addr;
232 if (!is_offset_in_branch_range(b_offset))
233 goto error;
234
235
236 b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
237 (unsigned long)nip;
238 if (!is_offset_in_branch_range(b_offset))
239 goto error;
240
241
242
243 size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
244 pr_devel("Copying template to %p, size %lu\n", buff, size);
245 for (i = 0; i < size; i++) {
246 rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
247 if (rc < 0)
248 goto error;
249 }
250
251
252
253
254
255 patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
256
257
258
259
260 op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
261 emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
262 if (!op_callback_addr || !emulate_step_addr) {
263 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
264 goto error;
265 }
266
267 branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
268 (unsigned long)op_callback_addr,
269 BRANCH_SET_LINK);
270
271 branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
272 (unsigned long)emulate_step_addr,
273 BRANCH_SET_LINK);
274
275 if (!branch_op_callback || !branch_emulate_step)
276 goto error;
277
278 patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
279 patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
280
281
282
283
284 patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
285
286
287
288
289 patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
290
291 flush_icache_range((unsigned long)buff,
292 (unsigned long)(&buff[TMPL_END_IDX]));
293
294 op->optinsn.insn = buff;
295
296 return 0;
297
298error:
299 free_ppc_optinsn_slot(buff, 0);
300 return -ERANGE;
301
302}
303
304int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
305{
306 return optinsn->insn != NULL;
307}
308
309
310
311
312
313
314int arch_check_optimized_kprobe(struct optimized_kprobe *op)
315{
316 return 0;
317}
318
319void arch_optimize_kprobes(struct list_head *oplist)
320{
321 struct optimized_kprobe *op;
322 struct optimized_kprobe *tmp;
323
324 list_for_each_entry_safe(op, tmp, oplist, list) {
325
326
327
328
329 memcpy(op->optinsn.copied_insn, op->kp.addr,
330 RELATIVEJUMP_SIZE);
331 patch_instruction(op->kp.addr,
332 create_branch((unsigned int *)op->kp.addr,
333 (unsigned long)op->optinsn.insn, 0));
334 list_del_init(&op->list);
335 }
336}
337
338void arch_unoptimize_kprobe(struct optimized_kprobe *op)
339{
340 arch_arm_kprobe(&op->kp);
341}
342
343void arch_unoptimize_kprobes(struct list_head *oplist,
344 struct list_head *done_list)
345{
346 struct optimized_kprobe *op;
347 struct optimized_kprobe *tmp;
348
349 list_for_each_entry_safe(op, tmp, oplist, list) {
350 arch_unoptimize_kprobe(op);
351 list_move(&op->list, done_list);
352 }
353}
354
355int arch_within_optimized_kprobe(struct optimized_kprobe *op,
356 unsigned long addr)
357{
358 return ((unsigned long)op->kp.addr <= addr &&
359 (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
360}
361