linux/arch/sh/kernel/hw_breakpoint.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/hw_breakpoint.c
   3 *
   4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
   5 *
   6 * Copyright (C) 2009 - 2010  Paul Mundt
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 */
  12#include <linux/init.h>
  13#include <linux/perf_event.h>
  14#include <linux/sched/signal.h>
  15#include <linux/hw_breakpoint.h>
  16#include <linux/percpu.h>
  17#include <linux/kallsyms.h>
  18#include <linux/notifier.h>
  19#include <linux/kprobes.h>
  20#include <linux/kdebug.h>
  21#include <linux/io.h>
  22#include <linux/clk.h>
  23#include <asm/hw_breakpoint.h>
  24#include <asm/mmu_context.h>
  25#include <asm/ptrace.h>
  26#include <asm/traps.h>
  27
  28/*
  29 * Stores the breakpoints currently in use on each breakpoint address
  30 * register for each cpus
  31 */
  32static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
  33
  34/*
  35 * A dummy placeholder for early accesses until the CPUs get a chance to
  36 * register their UBCs later in the boot process.
  37 */
  38static struct sh_ubc ubc_dummy = { .num_events = 0 };
  39
  40static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
  41
  42/*
  43 * Install a perf counter breakpoint.
  44 *
  45 * We seek a free UBC channel and use it for this breakpoint.
  46 *
  47 * Atomic: we hold the counter->ctx->lock and we only handle variables
  48 * and registers local to this cpu.
  49 */
  50int arch_install_hw_breakpoint(struct perf_event *bp)
  51{
  52        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  53        int i;
  54
  55        for (i = 0; i < sh_ubc->num_events; i++) {
  56                struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
  57
  58                if (!*slot) {
  59                        *slot = bp;
  60                        break;
  61                }
  62        }
  63
  64        if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
  65                return -EBUSY;
  66
  67        clk_enable(sh_ubc->clk);
  68        sh_ubc->enable(info, i);
  69
  70        return 0;
  71}
  72
  73/*
  74 * Uninstall the breakpoint contained in the given counter.
  75 *
  76 * First we search the debug address register it uses and then we disable
  77 * it.
  78 *
  79 * Atomic: we hold the counter->ctx->lock and we only handle variables
  80 * and registers local to this cpu.
  81 */
  82void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  83{
  84        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  85        int i;
  86
  87        for (i = 0; i < sh_ubc->num_events; i++) {
  88                struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
  89
  90                if (*slot == bp) {
  91                        *slot = NULL;
  92                        break;
  93                }
  94        }
  95
  96        if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
  97                return;
  98
  99        sh_ubc->disable(info, i);
 100        clk_disable(sh_ubc->clk);
 101}
 102
 103static int get_hbp_len(u16 hbp_len)
 104{
 105        unsigned int len_in_bytes = 0;
 106
 107        switch (hbp_len) {
 108        case SH_BREAKPOINT_LEN_1:
 109                len_in_bytes = 1;
 110                break;
 111        case SH_BREAKPOINT_LEN_2:
 112                len_in_bytes = 2;
 113                break;
 114        case SH_BREAKPOINT_LEN_4:
 115                len_in_bytes = 4;
 116                break;
 117        case SH_BREAKPOINT_LEN_8:
 118                len_in_bytes = 8;
 119                break;
 120        }
 121        return len_in_bytes;
 122}
 123
 124/*
 125 * Check for virtual address in kernel space.
 126 */
 127int arch_check_bp_in_kernelspace(struct perf_event *bp)
 128{
 129        unsigned int len;
 130        unsigned long va;
 131        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 132
 133        va = info->address;
 134        len = get_hbp_len(info->len);
 135
 136        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 137}
 138
 139int arch_bp_generic_fields(int sh_len, int sh_type,
 140                           int *gen_len, int *gen_type)
 141{
 142        /* Len */
 143        switch (sh_len) {
 144        case SH_BREAKPOINT_LEN_1:
 145                *gen_len = HW_BREAKPOINT_LEN_1;
 146                break;
 147        case SH_BREAKPOINT_LEN_2:
 148                *gen_len = HW_BREAKPOINT_LEN_2;
 149                break;
 150        case SH_BREAKPOINT_LEN_4:
 151                *gen_len = HW_BREAKPOINT_LEN_4;
 152                break;
 153        case SH_BREAKPOINT_LEN_8:
 154                *gen_len = HW_BREAKPOINT_LEN_8;
 155                break;
 156        default:
 157                return -EINVAL;
 158        }
 159
 160        /* Type */
 161        switch (sh_type) {
 162        case SH_BREAKPOINT_READ:
 163                *gen_type = HW_BREAKPOINT_R;
 164        case SH_BREAKPOINT_WRITE:
 165                *gen_type = HW_BREAKPOINT_W;
 166                break;
 167        case SH_BREAKPOINT_RW:
 168                *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
 169                break;
 170        default:
 171                return -EINVAL;
 172        }
 173
 174        return 0;
 175}
 176
 177static int arch_build_bp_info(struct perf_event *bp)
 178{
 179        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 180
 181        info->address = bp->attr.bp_addr;
 182
 183        /* Len */
 184        switch (bp->attr.bp_len) {
 185        case HW_BREAKPOINT_LEN_1:
 186                info->len = SH_BREAKPOINT_LEN_1;
 187                break;
 188        case HW_BREAKPOINT_LEN_2:
 189                info->len = SH_BREAKPOINT_LEN_2;
 190                break;
 191        case HW_BREAKPOINT_LEN_4:
 192                info->len = SH_BREAKPOINT_LEN_4;
 193                break;
 194        case HW_BREAKPOINT_LEN_8:
 195                info->len = SH_BREAKPOINT_LEN_8;
 196                break;
 197        default:
 198                return -EINVAL;
 199        }
 200
 201        /* Type */
 202        switch (bp->attr.bp_type) {
 203        case HW_BREAKPOINT_R:
 204                info->type = SH_BREAKPOINT_READ;
 205                break;
 206        case HW_BREAKPOINT_W:
 207                info->type = SH_BREAKPOINT_WRITE;
 208                break;
 209        case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
 210                info->type = SH_BREAKPOINT_RW;
 211                break;
 212        default:
 213                return -EINVAL;
 214        }
 215
 216        return 0;
 217}
 218
 219/*
 220 * Validate the arch-specific HW Breakpoint register settings
 221 */
 222int arch_validate_hwbkpt_settings(struct perf_event *bp)
 223{
 224        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 225        unsigned int align;
 226        int ret;
 227
 228        ret = arch_build_bp_info(bp);
 229        if (ret)
 230                return ret;
 231
 232        ret = -EINVAL;
 233
 234        switch (info->len) {
 235        case SH_BREAKPOINT_LEN_1:
 236                align = 0;
 237                break;
 238        case SH_BREAKPOINT_LEN_2:
 239                align = 1;
 240                break;
 241        case SH_BREAKPOINT_LEN_4:
 242                align = 3;
 243                break;
 244        case SH_BREAKPOINT_LEN_8:
 245                align = 7;
 246                break;
 247        default:
 248                return ret;
 249        }
 250
 251        /*
 252         * For kernel-addresses, either the address or symbol name can be
 253         * specified.
 254         */
 255        if (info->name)
 256                info->address = (unsigned long)kallsyms_lookup_name(info->name);
 257
 258        /*
 259         * Check that the low-order bits of the address are appropriate
 260         * for the alignment implied by len.
 261         */
 262        if (info->address & align)
 263                return -EINVAL;
 264
 265        return 0;
 266}
 267
 268/*
 269 * Release the user breakpoints used by ptrace
 270 */
 271void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
 272{
 273        int i;
 274        struct thread_struct *t = &tsk->thread;
 275
 276        for (i = 0; i < sh_ubc->num_events; i++) {
 277                unregister_hw_breakpoint(t->ptrace_bps[i]);
 278                t->ptrace_bps[i] = NULL;
 279        }
 280}
 281
 282static int __kprobes hw_breakpoint_handler(struct die_args *args)
 283{
 284        int cpu, i, rc = NOTIFY_STOP;
 285        struct perf_event *bp;
 286        unsigned int cmf, resume_mask;
 287
 288        /*
 289         * Do an early return if none of the channels triggered.
 290         */
 291        cmf = sh_ubc->triggered_mask();
 292        if (unlikely(!cmf))
 293                return NOTIFY_DONE;
 294
 295        /*
 296         * By default, resume all of the active channels.
 297         */
 298        resume_mask = sh_ubc->active_mask();
 299
 300        /*
 301         * Disable breakpoints during exception handling.
 302         */
 303        sh_ubc->disable_all();
 304
 305        cpu = get_cpu();
 306        for (i = 0; i < sh_ubc->num_events; i++) {
 307                unsigned long event_mask = (1 << i);
 308
 309                if (likely(!(cmf & event_mask)))
 310                        continue;
 311
 312                /*
 313                 * The counter may be concurrently released but that can only
 314                 * occur from a call_rcu() path. We can then safely fetch
 315                 * the breakpoint, use its callback, touch its counter
 316                 * while we are in an rcu_read_lock() path.
 317                 */
 318                rcu_read_lock();
 319
 320                bp = per_cpu(bp_per_reg[i], cpu);
 321                if (bp)
 322                        rc = NOTIFY_DONE;
 323
 324                /*
 325                 * Reset the condition match flag to denote completion of
 326                 * exception handling.
 327                 */
 328                sh_ubc->clear_triggered_mask(event_mask);
 329
 330                /*
 331                 * bp can be NULL due to concurrent perf counter
 332                 * removing.
 333                 */
 334                if (!bp) {
 335                        rcu_read_unlock();
 336                        break;
 337                }
 338
 339                /*
 340                 * Don't restore the channel if the breakpoint is from
 341                 * ptrace, as it always operates in one-shot mode.
 342                 */
 343                if (bp->overflow_handler == ptrace_triggered)
 344                        resume_mask &= ~(1 << i);
 345
 346                perf_bp_event(bp, args->regs);
 347
 348                /* Deliver the signal to userspace */
 349                if (!arch_check_bp_in_kernelspace(bp)) {
 350                        siginfo_t info;
 351
 352                        info.si_signo = args->signr;
 353                        info.si_errno = notifier_to_errno(rc);
 354                        info.si_code = TRAP_HWBKPT;
 355
 356                        force_sig_info(args->signr, &info, current);
 357                }
 358
 359                rcu_read_unlock();
 360        }
 361
 362        if (cmf == 0)
 363                rc = NOTIFY_DONE;
 364
 365        sh_ubc->enable_all(resume_mask);
 366
 367        put_cpu();
 368
 369        return rc;
 370}
 371
 372BUILD_TRAP_HANDLER(breakpoint)
 373{
 374        unsigned long ex = lookup_exception_vector();
 375        TRAP_HANDLER_DECL;
 376
 377        notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
 378}
 379
 380/*
 381 * Handle debug exception notifications.
 382 */
 383int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
 384                                    unsigned long val, void *data)
 385{
 386        struct die_args *args = data;
 387
 388        if (val != DIE_BREAKPOINT)
 389                return NOTIFY_DONE;
 390
 391        /*
 392         * If the breakpoint hasn't been triggered by the UBC, it's
 393         * probably from a debugger, so don't do anything more here.
 394         *
 395         * This also permits the UBC interface clock to remain off for
 396         * non-UBC breakpoints, as we don't need to check the triggered
 397         * or active channel masks.
 398         */
 399        if (args->trapnr != sh_ubc->trap_nr)
 400                return NOTIFY_DONE;
 401
 402        return hw_breakpoint_handler(data);
 403}
 404
 405void hw_breakpoint_pmu_read(struct perf_event *bp)
 406{
 407        /* TODO */
 408}
 409
 410int register_sh_ubc(struct sh_ubc *ubc)
 411{
 412        /* Bail if it's already assigned */
 413        if (sh_ubc != &ubc_dummy)
 414                return -EBUSY;
 415        sh_ubc = ubc;
 416
 417        pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
 418
 419        WARN_ON(ubc->num_events > HBP_NUM);
 420
 421        return 0;
 422}
 423