linux/arch/sh/kernel/hw_breakpoint.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/hw_breakpoint.c
   3 *
   4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
   5 *
   6 * Copyright (C) 2009 - 2010  Paul Mundt
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 */
  12#include <linux/init.h>
  13#include <linux/perf_event.h>
  14#include <linux/sched/signal.h>
  15#include <linux/hw_breakpoint.h>
  16#include <linux/percpu.h>
  17#include <linux/kallsyms.h>
  18#include <linux/notifier.h>
  19#include <linux/kprobes.h>
  20#include <linux/kdebug.h>
  21#include <linux/io.h>
  22#include <linux/clk.h>
  23#include <asm/hw_breakpoint.h>
  24#include <asm/mmu_context.h>
  25#include <asm/ptrace.h>
  26#include <asm/traps.h>
  27
  28/*
  29 * Stores the breakpoints currently in use on each breakpoint address
  30 * register for each cpus
  31 */
  32static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
  33
  34/*
  35 * A dummy placeholder for early accesses until the CPUs get a chance to
  36 * register their UBCs later in the boot process.
  37 */
  38static struct sh_ubc ubc_dummy = { .num_events = 0 };
  39
  40static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
  41
  42/*
  43 * Install a perf counter breakpoint.
  44 *
  45 * We seek a free UBC channel and use it for this breakpoint.
  46 *
  47 * Atomic: we hold the counter->ctx->lock and we only handle variables
  48 * and registers local to this cpu.
  49 */
  50int arch_install_hw_breakpoint(struct perf_event *bp)
  51{
  52        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  53        int i;
  54
  55        for (i = 0; i < sh_ubc->num_events; i++) {
  56                struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
  57
  58                if (!*slot) {
  59                        *slot = bp;
  60                        break;
  61                }
  62        }
  63
  64        if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
  65                return -EBUSY;
  66
  67        clk_enable(sh_ubc->clk);
  68        sh_ubc->enable(info, i);
  69
  70        return 0;
  71}
  72
  73/*
  74 * Uninstall the breakpoint contained in the given counter.
  75 *
  76 * First we search the debug address register it uses and then we disable
  77 * it.
  78 *
  79 * Atomic: we hold the counter->ctx->lock and we only handle variables
  80 * and registers local to this cpu.
  81 */
  82void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  83{
  84        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  85        int i;
  86
  87        for (i = 0; i < sh_ubc->num_events; i++) {
  88                struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
  89
  90                if (*slot == bp) {
  91                        *slot = NULL;
  92                        break;
  93                }
  94        }
  95
  96        if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
  97                return;
  98
  99        sh_ubc->disable(info, i);
 100        clk_disable(sh_ubc->clk);
 101}
 102
 103static int get_hbp_len(u16 hbp_len)
 104{
 105        unsigned int len_in_bytes = 0;
 106
 107        switch (hbp_len) {
 108        case SH_BREAKPOINT_LEN_1:
 109                len_in_bytes = 1;
 110                break;
 111        case SH_BREAKPOINT_LEN_2:
 112                len_in_bytes = 2;
 113                break;
 114        case SH_BREAKPOINT_LEN_4:
 115                len_in_bytes = 4;
 116                break;
 117        case SH_BREAKPOINT_LEN_8:
 118                len_in_bytes = 8;
 119                break;
 120        }
 121        return len_in_bytes;
 122}
 123
 124/*
 125 * Check for virtual address in kernel space.
 126 */
 127int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 128{
 129        unsigned int len;
 130        unsigned long va;
 131
 132        va = hw->address;
 133        len = get_hbp_len(hw->len);
 134
 135        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 136}
 137
 138int arch_bp_generic_fields(int sh_len, int sh_type,
 139                           int *gen_len, int *gen_type)
 140{
 141        /* Len */
 142        switch (sh_len) {
 143        case SH_BREAKPOINT_LEN_1:
 144                *gen_len = HW_BREAKPOINT_LEN_1;
 145                break;
 146        case SH_BREAKPOINT_LEN_2:
 147                *gen_len = HW_BREAKPOINT_LEN_2;
 148                break;
 149        case SH_BREAKPOINT_LEN_4:
 150                *gen_len = HW_BREAKPOINT_LEN_4;
 151                break;
 152        case SH_BREAKPOINT_LEN_8:
 153                *gen_len = HW_BREAKPOINT_LEN_8;
 154                break;
 155        default:
 156                return -EINVAL;
 157        }
 158
 159        /* Type */
 160        switch (sh_type) {
 161        case SH_BREAKPOINT_READ:
 162                *gen_type = HW_BREAKPOINT_R;
 163        case SH_BREAKPOINT_WRITE:
 164                *gen_type = HW_BREAKPOINT_W;
 165                break;
 166        case SH_BREAKPOINT_RW:
 167                *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
 168                break;
 169        default:
 170                return -EINVAL;
 171        }
 172
 173        return 0;
 174}
 175
 176static int arch_build_bp_info(struct perf_event *bp)
 177{
 178        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 179
 180        info->address = bp->attr.bp_addr;
 181
 182        /* Len */
 183        switch (bp->attr.bp_len) {
 184        case HW_BREAKPOINT_LEN_1:
 185                info->len = SH_BREAKPOINT_LEN_1;
 186                break;
 187        case HW_BREAKPOINT_LEN_2:
 188                info->len = SH_BREAKPOINT_LEN_2;
 189                break;
 190        case HW_BREAKPOINT_LEN_4:
 191                info->len = SH_BREAKPOINT_LEN_4;
 192                break;
 193        case HW_BREAKPOINT_LEN_8:
 194                info->len = SH_BREAKPOINT_LEN_8;
 195                break;
 196        default:
 197                return -EINVAL;
 198        }
 199
 200        /* Type */
 201        switch (bp->attr.bp_type) {
 202        case HW_BREAKPOINT_R:
 203                info->type = SH_BREAKPOINT_READ;
 204                break;
 205        case HW_BREAKPOINT_W:
 206                info->type = SH_BREAKPOINT_WRITE;
 207                break;
 208        case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
 209                info->type = SH_BREAKPOINT_RW;
 210                break;
 211        default:
 212                return -EINVAL;
 213        }
 214
 215        return 0;
 216}
 217
 218/*
 219 * Validate the arch-specific HW Breakpoint register settings
 220 */
 221int arch_validate_hwbkpt_settings(struct perf_event *bp)
 222{
 223        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 224        unsigned int align;
 225        int ret;
 226
 227        ret = arch_build_bp_info(bp);
 228        if (ret)
 229                return ret;
 230
 231        ret = -EINVAL;
 232
 233        switch (info->len) {
 234        case SH_BREAKPOINT_LEN_1:
 235                align = 0;
 236                break;
 237        case SH_BREAKPOINT_LEN_2:
 238                align = 1;
 239                break;
 240        case SH_BREAKPOINT_LEN_4:
 241                align = 3;
 242                break;
 243        case SH_BREAKPOINT_LEN_8:
 244                align = 7;
 245                break;
 246        default:
 247                return ret;
 248        }
 249
 250        /*
 251         * For kernel-addresses, either the address or symbol name can be
 252         * specified.
 253         */
 254        if (info->name)
 255                info->address = (unsigned long)kallsyms_lookup_name(info->name);
 256
 257        /*
 258         * Check that the low-order bits of the address are appropriate
 259         * for the alignment implied by len.
 260         */
 261        if (info->address & align)
 262                return -EINVAL;
 263
 264        return 0;
 265}
 266
 267/*
 268 * Release the user breakpoints used by ptrace
 269 */
 270void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
 271{
 272        int i;
 273        struct thread_struct *t = &tsk->thread;
 274
 275        for (i = 0; i < sh_ubc->num_events; i++) {
 276                unregister_hw_breakpoint(t->ptrace_bps[i]);
 277                t->ptrace_bps[i] = NULL;
 278        }
 279}
 280
 281static int __kprobes hw_breakpoint_handler(struct die_args *args)
 282{
 283        int cpu, i, rc = NOTIFY_STOP;
 284        struct perf_event *bp;
 285        unsigned int cmf, resume_mask;
 286
 287        /*
 288         * Do an early return if none of the channels triggered.
 289         */
 290        cmf = sh_ubc->triggered_mask();
 291        if (unlikely(!cmf))
 292                return NOTIFY_DONE;
 293
 294        /*
 295         * By default, resume all of the active channels.
 296         */
 297        resume_mask = sh_ubc->active_mask();
 298
 299        /*
 300         * Disable breakpoints during exception handling.
 301         */
 302        sh_ubc->disable_all();
 303
 304        cpu = get_cpu();
 305        for (i = 0; i < sh_ubc->num_events; i++) {
 306                unsigned long event_mask = (1 << i);
 307
 308                if (likely(!(cmf & event_mask)))
 309                        continue;
 310
 311                /*
 312                 * The counter may be concurrently released but that can only
 313                 * occur from a call_rcu() path. We can then safely fetch
 314                 * the breakpoint, use its callback, touch its counter
 315                 * while we are in an rcu_read_lock() path.
 316                 */
 317                rcu_read_lock();
 318
 319                bp = per_cpu(bp_per_reg[i], cpu);
 320                if (bp)
 321                        rc = NOTIFY_DONE;
 322
 323                /*
 324                 * Reset the condition match flag to denote completion of
 325                 * exception handling.
 326                 */
 327                sh_ubc->clear_triggered_mask(event_mask);
 328
 329                /*
 330                 * bp can be NULL due to concurrent perf counter
 331                 * removing.
 332                 */
 333                if (!bp) {
 334                        rcu_read_unlock();
 335                        break;
 336                }
 337
 338                /*
 339                 * Don't restore the channel if the breakpoint is from
 340                 * ptrace, as it always operates in one-shot mode.
 341                 */
 342                if (bp->overflow_handler == ptrace_triggered)
 343                        resume_mask &= ~(1 << i);
 344
 345                perf_bp_event(bp, args->regs);
 346
 347                /* Deliver the signal to userspace */
 348                if (!arch_check_bp_in_kernelspace(&bp->hw.info)) {
 349                        force_sig_fault(SIGTRAP, TRAP_HWBKPT,
 350                                        (void __user *)NULL, current);
 351                }
 352
 353                rcu_read_unlock();
 354        }
 355
 356        if (cmf == 0)
 357                rc = NOTIFY_DONE;
 358
 359        sh_ubc->enable_all(resume_mask);
 360
 361        put_cpu();
 362
 363        return rc;
 364}
 365
 366BUILD_TRAP_HANDLER(breakpoint)
 367{
 368        unsigned long ex = lookup_exception_vector();
 369        TRAP_HANDLER_DECL;
 370
 371        notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
 372}
 373
 374/*
 375 * Handle debug exception notifications.
 376 */
 377int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
 378                                    unsigned long val, void *data)
 379{
 380        struct die_args *args = data;
 381
 382        if (val != DIE_BREAKPOINT)
 383                return NOTIFY_DONE;
 384
 385        /*
 386         * If the breakpoint hasn't been triggered by the UBC, it's
 387         * probably from a debugger, so don't do anything more here.
 388         *
 389         * This also permits the UBC interface clock to remain off for
 390         * non-UBC breakpoints, as we don't need to check the triggered
 391         * or active channel masks.
 392         */
 393        if (args->trapnr != sh_ubc->trap_nr)
 394                return NOTIFY_DONE;
 395
 396        return hw_breakpoint_handler(data);
 397}
 398
 399void hw_breakpoint_pmu_read(struct perf_event *bp)
 400{
 401        /* TODO */
 402}
 403
 404int register_sh_ubc(struct sh_ubc *ubc)
 405{
 406        /* Bail if it's already assigned */
 407        if (sh_ubc != &ubc_dummy)
 408                return -EBUSY;
 409        sh_ubc = ubc;
 410
 411        pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
 412
 413        WARN_ON(ubc->num_events > HBP_NUM);
 414
 415        return 0;
 416}
 417