linux/arch/riscv/kernel/probes/kprobes.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3#define pr_fmt(fmt) "kprobes: " fmt
   4
   5#include <linux/kprobes.h>
   6#include <linux/extable.h>
   7#include <linux/slab.h>
   8#include <linux/stop_machine.h>
   9#include <asm/ptrace.h>
  10#include <linux/uaccess.h>
  11#include <asm/sections.h>
  12#include <asm/cacheflush.h>
  13#include <asm/bug.h>
  14#include <asm/patch.h>
  15
  16#include "decode-insn.h"
  17
  18DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  19DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  20
  21static void __kprobes
  22post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
  23
  24static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
  25{
  26        unsigned long offset = GET_INSN_LENGTH(p->opcode);
  27
  28        p->ainsn.api.restore = (unsigned long)p->addr + offset;
  29
  30        patch_text(p->ainsn.api.insn, p->opcode);
  31        patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
  32                   __BUG_INSN_32);
  33}
  34
  35static void __kprobes arch_prepare_simulate(struct kprobe *p)
  36{
  37        p->ainsn.api.restore = 0;
  38}
  39
  40static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
  41{
  42        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  43
  44        if (p->ainsn.api.handler)
  45                p->ainsn.api.handler((u32)p->opcode,
  46                                        (unsigned long)p->addr, regs);
  47
  48        post_kprobe_handler(p, kcb, regs);
  49}
  50
  51int __kprobes arch_prepare_kprobe(struct kprobe *p)
  52{
  53        unsigned long probe_addr = (unsigned long)p->addr;
  54
  55        if (probe_addr & 0x1)
  56                return -EILSEQ;
  57
  58        /* copy instruction */
  59        p->opcode = *p->addr;
  60
  61        /* decode instruction */
  62        switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
  63        case INSN_REJECTED:     /* insn not supported */
  64                return -EINVAL;
  65
  66        case INSN_GOOD_NO_SLOT: /* insn need simulation */
  67                p->ainsn.api.insn = NULL;
  68                break;
  69
  70        case INSN_GOOD: /* instruction uses slot */
  71                p->ainsn.api.insn = get_insn_slot();
  72                if (!p->ainsn.api.insn)
  73                        return -ENOMEM;
  74                break;
  75        }
  76
  77        /* prepare the instruction */
  78        if (p->ainsn.api.insn)
  79                arch_prepare_ss_slot(p);
  80        else
  81                arch_prepare_simulate(p);
  82
  83        return 0;
  84}
  85
  86#ifdef CONFIG_MMU
  87void *alloc_insn_page(void)
  88{
  89        return  __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
  90                                     GFP_KERNEL, PAGE_KERNEL_READ_EXEC,
  91                                     VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
  92                                     __builtin_return_address(0));
  93}
  94#endif
  95
  96/* install breakpoint in text */
  97void __kprobes arch_arm_kprobe(struct kprobe *p)
  98{
  99        if ((p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
 100                patch_text(p->addr, __BUG_INSN_32);
 101        else
 102                patch_text(p->addr, __BUG_INSN_16);
 103}
 104
 105/* remove breakpoint from text */
 106void __kprobes arch_disarm_kprobe(struct kprobe *p)
 107{
 108        patch_text(p->addr, p->opcode);
 109}
 110
 111void __kprobes arch_remove_kprobe(struct kprobe *p)
 112{
 113}
 114
 115static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 116{
 117        kcb->prev_kprobe.kp = kprobe_running();
 118        kcb->prev_kprobe.status = kcb->kprobe_status;
 119}
 120
 121static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 122{
 123        __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 124        kcb->kprobe_status = kcb->prev_kprobe.status;
 125}
 126
 127static void __kprobes set_current_kprobe(struct kprobe *p)
 128{
 129        __this_cpu_write(current_kprobe, p);
 130}
 131
 132/*
 133 * Interrupts need to be disabled before single-step mode is set, and not
 134 * reenabled until after single-step mode ends.
 135 * Without disabling interrupt on local CPU, there is a chance of
 136 * interrupt occurrence in the period of exception return and  start of
 137 * out-of-line single-step, that result in wrongly single stepping
 138 * into the interrupt handler.
 139 */
 140static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
 141                                                struct pt_regs *regs)
 142{
 143        kcb->saved_status = regs->status;
 144        regs->status &= ~SR_SPIE;
 145}
 146
 147static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
 148                                                struct pt_regs *regs)
 149{
 150        regs->status = kcb->saved_status;
 151}
 152
 153static void __kprobes setup_singlestep(struct kprobe *p,
 154                                       struct pt_regs *regs,
 155                                       struct kprobe_ctlblk *kcb, int reenter)
 156{
 157        unsigned long slot;
 158
 159        if (reenter) {
 160                save_previous_kprobe(kcb);
 161                set_current_kprobe(p);
 162                kcb->kprobe_status = KPROBE_REENTER;
 163        } else {
 164                kcb->kprobe_status = KPROBE_HIT_SS;
 165        }
 166
 167        if (p->ainsn.api.insn) {
 168                /* prepare for single stepping */
 169                slot = (unsigned long)p->ainsn.api.insn;
 170
 171                /* IRQs and single stepping do not mix well. */
 172                kprobes_save_local_irqflag(kcb, regs);
 173
 174                instruction_pointer_set(regs, slot);
 175        } else {
 176                /* insn simulation */
 177                arch_simulate_insn(p, regs);
 178        }
 179}
 180
 181static int __kprobes reenter_kprobe(struct kprobe *p,
 182                                    struct pt_regs *regs,
 183                                    struct kprobe_ctlblk *kcb)
 184{
 185        switch (kcb->kprobe_status) {
 186        case KPROBE_HIT_SSDONE:
 187        case KPROBE_HIT_ACTIVE:
 188                kprobes_inc_nmissed_count(p);
 189                setup_singlestep(p, regs, kcb, 1);
 190                break;
 191        case KPROBE_HIT_SS:
 192        case KPROBE_REENTER:
 193                pr_warn("Failed to recover from reentered kprobes.\n");
 194                dump_kprobe(p);
 195                BUG();
 196                break;
 197        default:
 198                WARN_ON(1);
 199                return 0;
 200        }
 201
 202        return 1;
 203}
 204
 205static void __kprobes
 206post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
 207{
 208        /* return addr restore if non-branching insn */
 209        if (cur->ainsn.api.restore != 0)
 210                regs->epc = cur->ainsn.api.restore;
 211
 212        /* restore back original saved kprobe variables and continue */
 213        if (kcb->kprobe_status == KPROBE_REENTER) {
 214                restore_previous_kprobe(kcb);
 215                return;
 216        }
 217
 218        /* call post handler */
 219        kcb->kprobe_status = KPROBE_HIT_SSDONE;
 220        if (cur->post_handler)  {
 221                /* post_handler can hit breakpoint and single step
 222                 * again, so we enable D-flag for recursive exception.
 223                 */
 224                cur->post_handler(cur, regs, 0);
 225        }
 226
 227        reset_current_kprobe();
 228}
 229
 230int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
 231{
 232        struct kprobe *cur = kprobe_running();
 233        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 234
 235        switch (kcb->kprobe_status) {
 236        case KPROBE_HIT_SS:
 237        case KPROBE_REENTER:
 238                /*
 239                 * We are here because the instruction being single
 240                 * stepped caused a page fault. We reset the current
 241                 * kprobe and the ip points back to the probe address
 242                 * and allow the page fault handler to continue as a
 243                 * normal page fault.
 244                 */
 245                regs->epc = (unsigned long) cur->addr;
 246                BUG_ON(!instruction_pointer(regs));
 247
 248                if (kcb->kprobe_status == KPROBE_REENTER)
 249                        restore_previous_kprobe(kcb);
 250                else {
 251                        kprobes_restore_local_irqflag(kcb, regs);
 252                        reset_current_kprobe();
 253                }
 254
 255                break;
 256        case KPROBE_HIT_ACTIVE:
 257        case KPROBE_HIT_SSDONE:
 258                /*
 259                 * In case the user-specified fault handler returned
 260                 * zero, try to fix up.
 261                 */
 262                if (fixup_exception(regs))
 263                        return 1;
 264        }
 265        return 0;
 266}
 267
 268bool __kprobes
 269kprobe_breakpoint_handler(struct pt_regs *regs)
 270{
 271        struct kprobe *p, *cur_kprobe;
 272        struct kprobe_ctlblk *kcb;
 273        unsigned long addr = instruction_pointer(regs);
 274
 275        kcb = get_kprobe_ctlblk();
 276        cur_kprobe = kprobe_running();
 277
 278        p = get_kprobe((kprobe_opcode_t *) addr);
 279
 280        if (p) {
 281                if (cur_kprobe) {
 282                        if (reenter_kprobe(p, regs, kcb))
 283                                return true;
 284                } else {
 285                        /* Probe hit */
 286                        set_current_kprobe(p);
 287                        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 288
 289                        /*
 290                         * If we have no pre-handler or it returned 0, we
 291                         * continue with normal processing.  If we have a
 292                         * pre-handler and it returned non-zero, it will
 293                         * modify the execution path and no need to single
 294                         * stepping. Let's just reset current kprobe and exit.
 295                         *
 296                         * pre_handler can hit a breakpoint and can step thru
 297                         * before return.
 298                         */
 299                        if (!p->pre_handler || !p->pre_handler(p, regs))
 300                                setup_singlestep(p, regs, kcb, 0);
 301                        else
 302                                reset_current_kprobe();
 303                }
 304                return true;
 305        }
 306
 307        /*
 308         * The breakpoint instruction was removed right
 309         * after we hit it.  Another cpu has removed
 310         * either a probepoint or a debugger breakpoint
 311         * at this address.  In either case, no further
 312         * handling of this interrupt is appropriate.
 313         * Return back to original instruction, and continue.
 314         */
 315        return false;
 316}
 317
 318bool __kprobes
 319kprobe_single_step_handler(struct pt_regs *regs)
 320{
 321        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 322        unsigned long addr = instruction_pointer(regs);
 323        struct kprobe *cur = kprobe_running();
 324
 325        if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
 326            ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) {
 327                kprobes_restore_local_irqflag(kcb, regs);
 328                post_kprobe_handler(cur, kcb, regs);
 329                return true;
 330        }
 331        /* not ours, kprobes should ignore it */
 332        return false;
 333}
 334
 335/*
 336 * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
 337 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
 338 */
 339int __init arch_populate_kprobe_blacklist(void)
 340{
 341        int ret;
 342
 343        ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
 344                                        (unsigned long)__irqentry_text_end);
 345        return ret;
 346}
 347
 348void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
 349{
 350        return (void *)kretprobe_trampoline_handler(regs, NULL);
 351}
 352
 353void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 354                                      struct pt_regs *regs)
 355{
 356        ri->ret_addr = (kprobe_opcode_t *)regs->ra;
 357        ri->fp = NULL;
 358        regs->ra = (unsigned long) &__kretprobe_trampoline;
 359}
 360
 361int __kprobes arch_trampoline_kprobe(struct kprobe *p)
 362{
 363        return 0;
 364}
 365
 366int __init arch_init_kprobes(void)
 367{
 368        return 0;
 369}
 370