linux/arch/arc/kernel/kprobes.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   4 */
   5
   6#include <linux/types.h>
   7#include <linux/kprobes.h>
   8#include <linux/slab.h>
   9#include <linux/module.h>
  10#include <linux/kdebug.h>
  11#include <linux/sched.h>
  12#include <linux/uaccess.h>
  13#include <asm/cacheflush.h>
  14#include <asm/current.h>
  15#include <asm/disasm.h>
  16
  17#define MIN_STACK_SIZE(addr)    min((unsigned long)MAX_STACK_SIZE, \
  18                (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
  19
  20DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  21DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  22
  23int __kprobes arch_prepare_kprobe(struct kprobe *p)
  24{
  25        /* Attempt to probe at unaligned address */
  26        if ((unsigned long)p->addr & 0x01)
  27                return -EINVAL;
  28
  29        /* Address should not be in exception handling code */
  30
  31        p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
  32        p->opcode = *p->addr;
  33
  34        return 0;
  35}
  36
  37void __kprobes arch_arm_kprobe(struct kprobe *p)
  38{
  39        *p->addr = UNIMP_S_INSTRUCTION;
  40
  41        flush_icache_range((unsigned long)p->addr,
  42                           (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  43}
  44
  45void __kprobes arch_disarm_kprobe(struct kprobe *p)
  46{
  47        *p->addr = p->opcode;
  48
  49        flush_icache_range((unsigned long)p->addr,
  50                           (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  51}
  52
  53void __kprobes arch_remove_kprobe(struct kprobe *p)
  54{
  55        arch_disarm_kprobe(p);
  56
  57        /* Can we remove the kprobe in the middle of kprobe handling? */
  58        if (p->ainsn.t1_addr) {
  59                *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
  60
  61                flush_icache_range((unsigned long)p->ainsn.t1_addr,
  62                                   (unsigned long)p->ainsn.t1_addr +
  63                                   sizeof(kprobe_opcode_t));
  64
  65                p->ainsn.t1_addr = NULL;
  66        }
  67
  68        if (p->ainsn.t2_addr) {
  69                *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
  70
  71                flush_icache_range((unsigned long)p->ainsn.t2_addr,
  72                                   (unsigned long)p->ainsn.t2_addr +
  73                                   sizeof(kprobe_opcode_t));
  74
  75                p->ainsn.t2_addr = NULL;
  76        }
  77}
  78
  79static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  80{
  81        kcb->prev_kprobe.kp = kprobe_running();
  82        kcb->prev_kprobe.status = kcb->kprobe_status;
  83}
  84
  85static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  86{
  87        __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  88        kcb->kprobe_status = kcb->prev_kprobe.status;
  89}
  90
  91static inline void __kprobes set_current_kprobe(struct kprobe *p)
  92{
  93        __this_cpu_write(current_kprobe, p);
  94}
  95
  96static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
  97                                       struct pt_regs *regs)
  98{
  99        /* Remove the trap instructions inserted for single step and
 100         * restore the original instructions
 101         */
 102        if (p->ainsn.t1_addr) {
 103                *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
 104
 105                flush_icache_range((unsigned long)p->ainsn.t1_addr,
 106                                   (unsigned long)p->ainsn.t1_addr +
 107                                   sizeof(kprobe_opcode_t));
 108
 109                p->ainsn.t1_addr = NULL;
 110        }
 111
 112        if (p->ainsn.t2_addr) {
 113                *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
 114
 115                flush_icache_range((unsigned long)p->ainsn.t2_addr,
 116                                   (unsigned long)p->ainsn.t2_addr +
 117                                   sizeof(kprobe_opcode_t));
 118
 119                p->ainsn.t2_addr = NULL;
 120        }
 121
 122        return;
 123}
 124
 125static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
 126{
 127        unsigned long next_pc;
 128        unsigned long tgt_if_br = 0;
 129        int is_branch;
 130        unsigned long bta;
 131
 132        /* Copy the opcode back to the kprobe location and execute the
 133         * instruction. Because of this we will not be able to get into the
 134         * same kprobe until this kprobe is done
 135         */
 136        *(p->addr) = p->opcode;
 137
 138        flush_icache_range((unsigned long)p->addr,
 139                           (unsigned long)p->addr + sizeof(kprobe_opcode_t));
 140
 141        /* Now we insert the trap at the next location after this instruction to
 142         * single step. If it is a branch we insert the trap at possible branch
 143         * targets
 144         */
 145
 146        bta = regs->bta;
 147
 148        if (regs->status32 & 0x40) {
 149                /* We are in a delay slot with the branch taken */
 150
 151                next_pc = bta & ~0x01;
 152
 153                if (!p->ainsn.is_short) {
 154                        if (bta & 0x01)
 155                                regs->blink += 2;
 156                        else {
 157                                /* Branch not taken */
 158                                next_pc += 2;
 159
 160                                /* next pc is taken from bta after executing the
 161                                 * delay slot instruction
 162                                 */
 163                                regs->bta += 2;
 164                        }
 165                }
 166
 167                is_branch = 0;
 168        } else
 169                is_branch =
 170                    disasm_next_pc((unsigned long)p->addr, regs,
 171                        (struct callee_regs *) current->thread.callee_reg,
 172                        &next_pc, &tgt_if_br);
 173
 174        p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
 175        p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
 176        *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
 177
 178        flush_icache_range((unsigned long)p->ainsn.t1_addr,
 179                           (unsigned long)p->ainsn.t1_addr +
 180                           sizeof(kprobe_opcode_t));
 181
 182        if (is_branch) {
 183                p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
 184                p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
 185                *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
 186
 187                flush_icache_range((unsigned long)p->ainsn.t2_addr,
 188                                   (unsigned long)p->ainsn.t2_addr +
 189                                   sizeof(kprobe_opcode_t));
 190        }
 191}
 192
 193int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
 194{
 195        struct kprobe *p;
 196        struct kprobe_ctlblk *kcb;
 197
 198        preempt_disable();
 199
 200        kcb = get_kprobe_ctlblk();
 201        p = get_kprobe((unsigned long *)addr);
 202
 203        if (p) {
 204                /*
 205                 * We have reentered the kprobe_handler, since another kprobe
 206                 * was hit while within the handler, we save the original
 207                 * kprobes and single step on the instruction of the new probe
 208                 * without calling any user handlers to avoid recursive
 209                 * kprobes.
 210                 */
 211                if (kprobe_running()) {
 212                        save_previous_kprobe(kcb);
 213                        set_current_kprobe(p);
 214                        kprobes_inc_nmissed_count(p);
 215                        setup_singlestep(p, regs);
 216                        kcb->kprobe_status = KPROBE_REENTER;
 217                        return 1;
 218                }
 219
 220                set_current_kprobe(p);
 221                kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 222
 223                /* If we have no pre-handler or it returned 0, we continue with
 224                 * normal processing. If we have a pre-handler and it returned
 225                 * non-zero - which means user handler setup registers to exit
 226                 * to another instruction, we must skip the single stepping.
 227                 */
 228                if (!p->pre_handler || !p->pre_handler(p, regs)) {
 229                        setup_singlestep(p, regs);
 230                        kcb->kprobe_status = KPROBE_HIT_SS;
 231                } else {
 232                        reset_current_kprobe();
 233                        preempt_enable_no_resched();
 234                }
 235
 236                return 1;
 237        }
 238
 239        /* no_kprobe: */
 240        preempt_enable_no_resched();
 241        return 0;
 242}
 243
 244static int __kprobes arc_post_kprobe_handler(unsigned long addr,
 245                                         struct pt_regs *regs)
 246{
 247        struct kprobe *cur = kprobe_running();
 248        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 249
 250        if (!cur)
 251                return 0;
 252
 253        resume_execution(cur, addr, regs);
 254
 255        /* Rearm the kprobe */
 256        arch_arm_kprobe(cur);
 257
 258        /*
 259         * When we return from trap instruction we go to the next instruction
 260         * We restored the actual instruction in resume_exectuiont and we to
 261         * return to the same address and execute it
 262         */
 263        regs->ret = addr;
 264
 265        if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
 266                kcb->kprobe_status = KPROBE_HIT_SSDONE;
 267                cur->post_handler(cur, regs, 0);
 268        }
 269
 270        if (kcb->kprobe_status == KPROBE_REENTER) {
 271                restore_previous_kprobe(kcb);
 272                goto out;
 273        }
 274
 275        reset_current_kprobe();
 276
 277out:
 278        preempt_enable_no_resched();
 279        return 1;
 280}
 281
 282/*
 283 * Fault can be for the instruction being single stepped or for the
 284 * pre/post handlers in the module.
 285 * This is applicable for applications like user probes, where we have the
 286 * probe in user space and the handlers in the kernel
 287 */
 288
 289int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
 290{
 291        struct kprobe *cur = kprobe_running();
 292        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 293
 294        switch (kcb->kprobe_status) {
 295        case KPROBE_HIT_SS:
 296        case KPROBE_REENTER:
 297                /*
 298                 * We are here because the instruction being single stepped
 299                 * caused the fault. We reset the current kprobe and allow the
 300                 * exception handler as if it is regular exception. In our
 301                 * case it doesn't matter because the system will be halted
 302                 */
 303                resume_execution(cur, (unsigned long)cur->addr, regs);
 304
 305                if (kcb->kprobe_status == KPROBE_REENTER)
 306                        restore_previous_kprobe(kcb);
 307                else
 308                        reset_current_kprobe();
 309
 310                preempt_enable_no_resched();
 311                break;
 312
 313        case KPROBE_HIT_ACTIVE:
 314        case KPROBE_HIT_SSDONE:
 315                /*
 316                 * We are here because the instructions in the pre/post handler
 317                 * caused the fault.
 318                 */
 319
 320                /*
 321                 * In case the user-specified fault handler returned zero,
 322                 * try to fix up.
 323                 */
 324                if (fixup_exception(regs))
 325                        return 1;
 326
 327                /*
 328                 * fixup_exception() could not handle it,
 329                 * Let do_page_fault() fix it.
 330                 */
 331                break;
 332
 333        default:
 334                break;
 335        }
 336        return 0;
 337}
 338
 339int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
 340                                       unsigned long val, void *data)
 341{
 342        struct die_args *args = data;
 343        unsigned long addr = args->err;
 344        int ret = NOTIFY_DONE;
 345
 346        switch (val) {
 347        case DIE_IERR:
 348                if (arc_kprobe_handler(addr, args->regs))
 349                        return NOTIFY_STOP;
 350                break;
 351
 352        case DIE_TRAP:
 353                if (arc_post_kprobe_handler(addr, args->regs))
 354                        return NOTIFY_STOP;
 355                break;
 356
 357        default:
 358                break;
 359        }
 360
 361        return ret;
 362}
 363
 364static void __used kretprobe_trampoline_holder(void)
 365{
 366        __asm__ __volatile__(".global kretprobe_trampoline\n"
 367                             "kretprobe_trampoline:\n" "nop\n");
 368}
 369
 370void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 371                                      struct pt_regs *regs)
 372{
 373
 374        ri->ret_addr = (kprobe_opcode_t *) regs->blink;
 375        ri->fp = NULL;
 376
 377        /* Replace the return addr with trampoline addr */
 378        regs->blink = (unsigned long)&kretprobe_trampoline;
 379}
 380
 381static int __kprobes trampoline_probe_handler(struct kprobe *p,
 382                                              struct pt_regs *regs)
 383{
 384        regs->ret = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
 385
 386        /* By returning a non zero value, we are telling the kprobe handler
 387         * that we don't want the post_handler to run
 388         */
 389        return 1;
 390}
 391
 392static struct kprobe trampoline_p = {
 393        .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
 394        .pre_handler = trampoline_probe_handler
 395};
 396
 397int __init arch_init_kprobes(void)
 398{
 399        /* Registering the trampoline code for the kret probe */
 400        return register_kprobe(&trampoline_p);
 401}
 402
 403int __kprobes arch_trampoline_kprobe(struct kprobe *p)
 404{
 405        if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
 406                return 1;
 407
 408        return 0;
 409}
 410
 411void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
 412{
 413        notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);
 414}
 415