linux/arch/csky/kernel/probes/kprobes.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3#define pr_fmt(fmt) "kprobes: " fmt
   4
   5#include <linux/kprobes.h>
   6#include <linux/extable.h>
   7#include <linux/slab.h>
   8#include <linux/stop_machine.h>
   9#include <asm/ptrace.h>
  10#include <linux/uaccess.h>
  11#include <asm/sections.h>
  12#include <asm/cacheflush.h>
  13
  14#include "decode-insn.h"
  15
  16DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  17DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  18
  19static void __kprobes
  20post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
  21
  22struct csky_insn_patch {
  23        kprobe_opcode_t *addr;
  24        u32             opcode;
  25        atomic_t        cpu_count;
  26};
  27
  28static int __kprobes patch_text_cb(void *priv)
  29{
  30        struct csky_insn_patch *param = priv;
  31        unsigned int addr = (unsigned int)param->addr;
  32
  33        if (atomic_inc_return(&param->cpu_count) == 1) {
  34                *(u16 *) addr = cpu_to_le16(param->opcode);
  35                dcache_wb_range(addr, addr + 2);
  36                atomic_inc(&param->cpu_count);
  37        } else {
  38                while (atomic_read(&param->cpu_count) <= num_online_cpus())
  39                        cpu_relax();
  40        }
  41
  42        icache_inv_range(addr, addr + 2);
  43
  44        return 0;
  45}
  46
  47static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
  48{
  49        struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) };
  50
  51        return stop_machine_cpuslocked(patch_text_cb, &param, cpu_online_mask);
  52}
  53
  54static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
  55{
  56        unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
  57
  58        p->ainsn.api.restore = (unsigned long)p->addr + offset;
  59
  60        patch_text(p->ainsn.api.insn, p->opcode);
  61}
  62
  63static void __kprobes arch_prepare_simulate(struct kprobe *p)
  64{
  65        p->ainsn.api.restore = 0;
  66}
  67
  68static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
  69{
  70        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  71
  72        if (p->ainsn.api.handler)
  73                p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
  74
  75        post_kprobe_handler(kcb, regs);
  76}
  77
  78int __kprobes arch_prepare_kprobe(struct kprobe *p)
  79{
  80        unsigned long probe_addr = (unsigned long)p->addr;
  81
  82        if (probe_addr & 0x1)
  83                return -EILSEQ;
  84
  85        /* copy instruction */
  86        p->opcode = le32_to_cpu(*p->addr);
  87
  88        /* decode instruction */
  89        switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) {
  90        case INSN_REJECTED:     /* insn not supported */
  91                return -EINVAL;
  92
  93        case INSN_GOOD_NO_SLOT: /* insn need simulation */
  94                p->ainsn.api.insn = NULL;
  95                break;
  96
  97        case INSN_GOOD: /* instruction uses slot */
  98                p->ainsn.api.insn = get_insn_slot();
  99                if (!p->ainsn.api.insn)
 100                        return -ENOMEM;
 101                break;
 102        }
 103
 104        /* prepare the instruction */
 105        if (p->ainsn.api.insn)
 106                arch_prepare_ss_slot(p);
 107        else
 108                arch_prepare_simulate(p);
 109
 110        return 0;
 111}
 112
 113/* install breakpoint in text */
 114void __kprobes arch_arm_kprobe(struct kprobe *p)
 115{
 116        patch_text(p->addr, USR_BKPT);
 117}
 118
 119/* remove breakpoint from text */
 120void __kprobes arch_disarm_kprobe(struct kprobe *p)
 121{
 122        patch_text(p->addr, p->opcode);
 123}
 124
 125void __kprobes arch_remove_kprobe(struct kprobe *p)
 126{
 127}
 128
 129static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 130{
 131        kcb->prev_kprobe.kp = kprobe_running();
 132        kcb->prev_kprobe.status = kcb->kprobe_status;
 133}
 134
 135static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 136{
 137        __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 138        kcb->kprobe_status = kcb->prev_kprobe.status;
 139}
 140
 141static void __kprobes set_current_kprobe(struct kprobe *p)
 142{
 143        __this_cpu_write(current_kprobe, p);
 144}
 145
 146/*
 147 * Interrupts need to be disabled before single-step mode is set, and not
 148 * reenabled until after single-step mode ends.
 149 * Without disabling interrupt on local CPU, there is a chance of
 150 * interrupt occurrence in the period of exception return and  start of
 151 * out-of-line single-step, that result in wrongly single stepping
 152 * into the interrupt handler.
 153 */
 154static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
 155                                                struct pt_regs *regs)
 156{
 157        kcb->saved_sr = regs->sr;
 158        regs->sr &= ~BIT(6);
 159}
 160
 161static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
 162                                                struct pt_regs *regs)
 163{
 164        regs->sr = kcb->saved_sr;
 165}
 166
 167static void __kprobes
 168set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
 169{
 170        unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
 171
 172        kcb->ss_ctx.ss_pending = true;
 173        kcb->ss_ctx.match_addr = addr + offset;
 174}
 175
 176static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
 177{
 178        kcb->ss_ctx.ss_pending = false;
 179        kcb->ss_ctx.match_addr = 0;
 180}
 181
 182#define TRACE_MODE_SI           BIT(14)
 183#define TRACE_MODE_MASK         ~(0x3 << 14)
 184#define TRACE_MODE_RUN          0
 185
 186static void __kprobes setup_singlestep(struct kprobe *p,
 187                                       struct pt_regs *regs,
 188                                       struct kprobe_ctlblk *kcb, int reenter)
 189{
 190        unsigned long slot;
 191
 192        if (reenter) {
 193                save_previous_kprobe(kcb);
 194                set_current_kprobe(p);
 195                kcb->kprobe_status = KPROBE_REENTER;
 196        } else {
 197                kcb->kprobe_status = KPROBE_HIT_SS;
 198        }
 199
 200        if (p->ainsn.api.insn) {
 201                /* prepare for single stepping */
 202                slot = (unsigned long)p->ainsn.api.insn;
 203
 204                set_ss_context(kcb, slot, p);   /* mark pending ss */
 205
 206                /* IRQs and single stepping do not mix well. */
 207                kprobes_save_local_irqflag(kcb, regs);
 208                regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
 209                instruction_pointer_set(regs, slot);
 210        } else {
 211                /* insn simulation */
 212                arch_simulate_insn(p, regs);
 213        }
 214}
 215
 216static int __kprobes reenter_kprobe(struct kprobe *p,
 217                                    struct pt_regs *regs,
 218                                    struct kprobe_ctlblk *kcb)
 219{
 220        switch (kcb->kprobe_status) {
 221        case KPROBE_HIT_SSDONE:
 222        case KPROBE_HIT_ACTIVE:
 223                kprobes_inc_nmissed_count(p);
 224                setup_singlestep(p, regs, kcb, 1);
 225                break;
 226        case KPROBE_HIT_SS:
 227        case KPROBE_REENTER:
 228                pr_warn("Failed to recover from reentered kprobes.\n");
 229                dump_kprobe(p);
 230                BUG();
 231                break;
 232        default:
 233                WARN_ON(1);
 234                return 0;
 235        }
 236
 237        return 1;
 238}
 239
 240static void __kprobes
 241post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
 242{
 243        struct kprobe *cur = kprobe_running();
 244
 245        if (!cur)
 246                return;
 247
 248        /* return addr restore if non-branching insn */
 249        if (cur->ainsn.api.restore != 0)
 250                regs->pc = cur->ainsn.api.restore;
 251
 252        /* restore back original saved kprobe variables and continue */
 253        if (kcb->kprobe_status == KPROBE_REENTER) {
 254                restore_previous_kprobe(kcb);
 255                return;
 256        }
 257
 258        /* call post handler */
 259        kcb->kprobe_status = KPROBE_HIT_SSDONE;
 260        if (cur->post_handler)  {
 261                /* post_handler can hit breakpoint and single step
 262                 * again, so we enable D-flag for recursive exception.
 263                 */
 264                cur->post_handler(cur, regs, 0);
 265        }
 266
 267        reset_current_kprobe();
 268}
 269
 270int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
 271{
 272        struct kprobe *cur = kprobe_running();
 273        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 274
 275        switch (kcb->kprobe_status) {
 276        case KPROBE_HIT_SS:
 277        case KPROBE_REENTER:
 278                /*
 279                 * We are here because the instruction being single
 280                 * stepped caused a page fault. We reset the current
 281                 * kprobe and the ip points back to the probe address
 282                 * and allow the page fault handler to continue as a
 283                 * normal page fault.
 284                 */
 285                regs->pc = (unsigned long) cur->addr;
 286                BUG_ON(!instruction_pointer(regs));
 287
 288                if (kcb->kprobe_status == KPROBE_REENTER)
 289                        restore_previous_kprobe(kcb);
 290                else
 291                        reset_current_kprobe();
 292
 293                break;
 294        case KPROBE_HIT_ACTIVE:
 295        case KPROBE_HIT_SSDONE:
 296                /*
 297                 * In case the user-specified fault handler returned
 298                 * zero, try to fix up.
 299                 */
 300                if (fixup_exception(regs))
 301                        return 1;
 302        }
 303        return 0;
 304}
 305
 306int __kprobes
 307kprobe_breakpoint_handler(struct pt_regs *regs)
 308{
 309        struct kprobe *p, *cur_kprobe;
 310        struct kprobe_ctlblk *kcb;
 311        unsigned long addr = instruction_pointer(regs);
 312
 313        kcb = get_kprobe_ctlblk();
 314        cur_kprobe = kprobe_running();
 315
 316        p = get_kprobe((kprobe_opcode_t *) addr);
 317
 318        if (p) {
 319                if (cur_kprobe) {
 320                        if (reenter_kprobe(p, regs, kcb))
 321                                return 1;
 322                } else {
 323                        /* Probe hit */
 324                        set_current_kprobe(p);
 325                        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 326
 327                        /*
 328                         * If we have no pre-handler or it returned 0, we
 329                         * continue with normal processing.  If we have a
 330                         * pre-handler and it returned non-zero, it will
 331                         * modify the execution path and no need to single
 332                         * stepping. Let's just reset current kprobe and exit.
 333                         *
 334                         * pre_handler can hit a breakpoint and can step thru
 335                         * before return.
 336                         */
 337                        if (!p->pre_handler || !p->pre_handler(p, regs))
 338                                setup_singlestep(p, regs, kcb, 0);
 339                        else
 340                                reset_current_kprobe();
 341                }
 342                return 1;
 343        }
 344
 345        /*
 346         * The breakpoint instruction was removed right
 347         * after we hit it.  Another cpu has removed
 348         * either a probepoint or a debugger breakpoint
 349         * at this address.  In either case, no further
 350         * handling of this interrupt is appropriate.
 351         * Return back to original instruction, and continue.
 352         */
 353        return 0;
 354}
 355
 356int __kprobes
 357kprobe_single_step_handler(struct pt_regs *regs)
 358{
 359        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 360
 361        if ((kcb->ss_ctx.ss_pending)
 362            && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
 363                clear_ss_context(kcb);  /* clear pending ss */
 364
 365                kprobes_restore_local_irqflag(kcb, regs);
 366                regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
 367
 368                post_kprobe_handler(kcb, regs);
 369                return 1;
 370        }
 371        return 0;
 372}
 373
 374/*
 375 * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
 376 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
 377 */
 378int __init arch_populate_kprobe_blacklist(void)
 379{
 380        int ret;
 381
 382        ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
 383                                        (unsigned long)__irqentry_text_end);
 384        return ret;
 385}
 386
 387void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
 388{
 389        return (void *)kretprobe_trampoline_handler(regs, NULL);
 390}
 391
 392void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 393                                      struct pt_regs *regs)
 394{
 395        ri->ret_addr = (kprobe_opcode_t *)regs->lr;
 396        ri->fp = NULL;
 397        regs->lr = (unsigned long) &__kretprobe_trampoline;
 398}
 399
 400int __kprobes arch_trampoline_kprobe(struct kprobe *p)
 401{
 402        return 0;
 403}
 404
 405int __init arch_init_kprobes(void)
 406{
 407        return 0;
 408}
 409