linux/arch/arm/kernel/hw_breakpoint.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (C) 2009, 2010 ARM Limited
   5 *
   6 * Author: Will Deacon <will.deacon@arm.com>
   7 */
   8
   9/*
  10 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  11 * using the CPU's debug registers.
  12 */
  13#define pr_fmt(fmt) "hw-breakpoint: " fmt
  14
  15#include <linux/errno.h>
  16#include <linux/hardirq.h>
  17#include <linux/perf_event.h>
  18#include <linux/hw_breakpoint.h>
  19#include <linux/smp.h>
  20#include <linux/cpu_pm.h>
  21#include <linux/coresight.h>
  22
  23#include <asm/cacheflush.h>
  24#include <asm/cputype.h>
  25#include <asm/current.h>
  26#include <asm/hw_breakpoint.h>
  27#include <asm/traps.h>
  28
  29/* Breakpoint currently in use for each BRP. */
  30static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
  31
  32/* Watchpoint currently in use for each WRP. */
  33static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
  34
  35/* Number of BRP/WRP registers on this CPU. */
  36static int core_num_brps __ro_after_init;
  37static int core_num_wrps __ro_after_init;
  38
  39/* Debug architecture version. */
  40static u8 debug_arch __ro_after_init;
  41
  42/* Does debug architecture support OS Save and Restore? */
  43static bool has_ossr __ro_after_init;
  44
  45/* Maximum supported watchpoint length. */
  46static u8 max_watchpoint_len __ro_after_init;
  47
  48#define READ_WB_REG_CASE(OP2, M, VAL)                   \
  49        case ((OP2 << 4) + M):                          \
  50                ARM_DBG_READ(c0, c ## M, OP2, VAL);     \
  51                break
  52
  53#define WRITE_WB_REG_CASE(OP2, M, VAL)                  \
  54        case ((OP2 << 4) + M):                          \
  55                ARM_DBG_WRITE(c0, c ## M, OP2, VAL);    \
  56                break
  57
  58#define GEN_READ_WB_REG_CASES(OP2, VAL)         \
  59        READ_WB_REG_CASE(OP2, 0, VAL);          \
  60        READ_WB_REG_CASE(OP2, 1, VAL);          \
  61        READ_WB_REG_CASE(OP2, 2, VAL);          \
  62        READ_WB_REG_CASE(OP2, 3, VAL);          \
  63        READ_WB_REG_CASE(OP2, 4, VAL);          \
  64        READ_WB_REG_CASE(OP2, 5, VAL);          \
  65        READ_WB_REG_CASE(OP2, 6, VAL);          \
  66        READ_WB_REG_CASE(OP2, 7, VAL);          \
  67        READ_WB_REG_CASE(OP2, 8, VAL);          \
  68        READ_WB_REG_CASE(OP2, 9, VAL);          \
  69        READ_WB_REG_CASE(OP2, 10, VAL);         \
  70        READ_WB_REG_CASE(OP2, 11, VAL);         \
  71        READ_WB_REG_CASE(OP2, 12, VAL);         \
  72        READ_WB_REG_CASE(OP2, 13, VAL);         \
  73        READ_WB_REG_CASE(OP2, 14, VAL);         \
  74        READ_WB_REG_CASE(OP2, 15, VAL)
  75
  76#define GEN_WRITE_WB_REG_CASES(OP2, VAL)        \
  77        WRITE_WB_REG_CASE(OP2, 0, VAL);         \
  78        WRITE_WB_REG_CASE(OP2, 1, VAL);         \
  79        WRITE_WB_REG_CASE(OP2, 2, VAL);         \
  80        WRITE_WB_REG_CASE(OP2, 3, VAL);         \
  81        WRITE_WB_REG_CASE(OP2, 4, VAL);         \
  82        WRITE_WB_REG_CASE(OP2, 5, VAL);         \
  83        WRITE_WB_REG_CASE(OP2, 6, VAL);         \
  84        WRITE_WB_REG_CASE(OP2, 7, VAL);         \
  85        WRITE_WB_REG_CASE(OP2, 8, VAL);         \
  86        WRITE_WB_REG_CASE(OP2, 9, VAL);         \
  87        WRITE_WB_REG_CASE(OP2, 10, VAL);        \
  88        WRITE_WB_REG_CASE(OP2, 11, VAL);        \
  89        WRITE_WB_REG_CASE(OP2, 12, VAL);        \
  90        WRITE_WB_REG_CASE(OP2, 13, VAL);        \
  91        WRITE_WB_REG_CASE(OP2, 14, VAL);        \
  92        WRITE_WB_REG_CASE(OP2, 15, VAL)
  93
  94static u32 read_wb_reg(int n)
  95{
  96        u32 val = 0;
  97
  98        switch (n) {
  99        GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
 100        GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
 101        GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
 102        GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
 103        default:
 104                pr_warn("attempt to read from unknown breakpoint register %d\n",
 105                        n);
 106        }
 107
 108        return val;
 109}
 110
 111static void write_wb_reg(int n, u32 val)
 112{
 113        switch (n) {
 114        GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
 115        GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
 116        GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
 117        GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
 118        default:
 119                pr_warn("attempt to write to unknown breakpoint register %d\n",
 120                        n);
 121        }
 122        isb();
 123}
 124
 125/* Determine debug architecture. */
 126static u8 get_debug_arch(void)
 127{
 128        u32 didr;
 129
 130        /* Do we implement the extended CPUID interface? */
 131        if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
 132                pr_warn_once("CPUID feature registers not supported. "
 133                             "Assuming v6 debug is present.\n");
 134                return ARM_DEBUG_ARCH_V6;
 135        }
 136
 137        ARM_DBG_READ(c0, c0, 0, didr);
 138        return (didr >> 16) & 0xf;
 139}
 140
 141u8 arch_get_debug_arch(void)
 142{
 143        return debug_arch;
 144}
 145
 146static int debug_arch_supported(void)
 147{
 148        u8 arch = get_debug_arch();
 149
 150        /* We don't support the memory-mapped interface. */
 151        return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
 152                arch >= ARM_DEBUG_ARCH_V7_1;
 153}
 154
 155/* Can we determine the watchpoint access type from the fsr? */
 156static int debug_exception_updates_fsr(void)
 157{
 158        return get_debug_arch() >= ARM_DEBUG_ARCH_V8;
 159}
 160
 161/* Determine number of WRP registers available. */
 162static int get_num_wrp_resources(void)
 163{
 164        u32 didr;
 165        ARM_DBG_READ(c0, c0, 0, didr);
 166        return ((didr >> 28) & 0xf) + 1;
 167}
 168
 169/* Determine number of BRP registers available. */
 170static int get_num_brp_resources(void)
 171{
 172        u32 didr;
 173        ARM_DBG_READ(c0, c0, 0, didr);
 174        return ((didr >> 24) & 0xf) + 1;
 175}
 176
 177/* Does this core support mismatch breakpoints? */
 178static int core_has_mismatch_brps(void)
 179{
 180        return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
 181                get_num_brp_resources() > 1);
 182}
 183
 184/* Determine number of usable WRPs available. */
 185static int get_num_wrps(void)
 186{
 187        /*
 188         * On debug architectures prior to 7.1, when a watchpoint fires, the
 189         * only way to work out which watchpoint it was is by disassembling
 190         * the faulting instruction and working out the address of the memory
 191         * access.
 192         *
 193         * Furthermore, we can only do this if the watchpoint was precise
 194         * since imprecise watchpoints prevent us from calculating register
 195         * based addresses.
 196         *
 197         * Providing we have more than 1 breakpoint register, we only report
 198         * a single watchpoint register for the time being. This way, we always
 199         * know which watchpoint fired. In the future we can either add a
 200         * disassembler and address generation emulator, or we can insert a
 201         * check to see if the DFAR is set on watchpoint exception entry
 202         * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
 203         * that it is set on some implementations].
 204         */
 205        if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
 206                return 1;
 207
 208        return get_num_wrp_resources();
 209}
 210
 211/* Determine number of usable BRPs available. */
 212static int get_num_brps(void)
 213{
 214        int brps = get_num_brp_resources();
 215        return core_has_mismatch_brps() ? brps - 1 : brps;
 216}
 217
 218/*
 219 * In order to access the breakpoint/watchpoint control registers,
 220 * we must be running in debug monitor mode. Unfortunately, we can
 221 * be put into halting debug mode at any time by an external debugger
 222 * but there is nothing we can do to prevent that.
 223 */
 224static int monitor_mode_enabled(void)
 225{
 226        u32 dscr;
 227        ARM_DBG_READ(c0, c1, 0, dscr);
 228        return !!(dscr & ARM_DSCR_MDBGEN);
 229}
 230
 231static int enable_monitor_mode(void)
 232{
 233        u32 dscr;
 234        ARM_DBG_READ(c0, c1, 0, dscr);
 235
 236        /* If monitor mode is already enabled, just return. */
 237        if (dscr & ARM_DSCR_MDBGEN)
 238                goto out;
 239
 240        /* Write to the corresponding DSCR. */
 241        switch (get_debug_arch()) {
 242        case ARM_DEBUG_ARCH_V6:
 243        case ARM_DEBUG_ARCH_V6_1:
 244                ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN));
 245                break;
 246        case ARM_DEBUG_ARCH_V7_ECP14:
 247        case ARM_DEBUG_ARCH_V7_1:
 248        case ARM_DEBUG_ARCH_V8:
 249                ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
 250                isb();
 251                break;
 252        default:
 253                return -ENODEV;
 254        }
 255
 256        /* Check that the write made it through. */
 257        ARM_DBG_READ(c0, c1, 0, dscr);
 258        if (!(dscr & ARM_DSCR_MDBGEN)) {
 259                pr_warn_once("Failed to enable monitor mode on CPU %d.\n",
 260                                smp_processor_id());
 261                return -EPERM;
 262        }
 263
 264out:
 265        return 0;
 266}
 267
 268int hw_breakpoint_slots(int type)
 269{
 270        if (!debug_arch_supported())
 271                return 0;
 272
 273        /*
 274         * We can be called early, so don't rely on
 275         * our static variables being initialised.
 276         */
 277        switch (type) {
 278        case TYPE_INST:
 279                return get_num_brps();
 280        case TYPE_DATA:
 281                return get_num_wrps();
 282        default:
 283                pr_warn("unknown slot type: %d\n", type);
 284                return 0;
 285        }
 286}
 287
 288/*
 289 * Check if 8-bit byte-address select is available.
 290 * This clobbers WRP 0.
 291 */
 292static u8 get_max_wp_len(void)
 293{
 294        u32 ctrl_reg;
 295        struct arch_hw_breakpoint_ctrl ctrl;
 296        u8 size = 4;
 297
 298        if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
 299                goto out;
 300
 301        memset(&ctrl, 0, sizeof(ctrl));
 302        ctrl.len = ARM_BREAKPOINT_LEN_8;
 303        ctrl_reg = encode_ctrl_reg(ctrl);
 304
 305        write_wb_reg(ARM_BASE_WVR, 0);
 306        write_wb_reg(ARM_BASE_WCR, ctrl_reg);
 307        if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
 308                size = 8;
 309
 310out:
 311        return size;
 312}
 313
 314u8 arch_get_max_wp_len(void)
 315{
 316        return max_watchpoint_len;
 317}
 318
 319/*
 320 * Install a perf counter breakpoint.
 321 */
 322int arch_install_hw_breakpoint(struct perf_event *bp)
 323{
 324        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 325        struct perf_event **slot, **slots;
 326        int i, max_slots, ctrl_base, val_base;
 327        u32 addr, ctrl;
 328
 329        addr = info->address;
 330        ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
 331
 332        if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
 333                /* Breakpoint */
 334                ctrl_base = ARM_BASE_BCR;
 335                val_base = ARM_BASE_BVR;
 336                slots = this_cpu_ptr(bp_on_reg);
 337                max_slots = core_num_brps;
 338        } else {
 339                /* Watchpoint */
 340                ctrl_base = ARM_BASE_WCR;
 341                val_base = ARM_BASE_WVR;
 342                slots = this_cpu_ptr(wp_on_reg);
 343                max_slots = core_num_wrps;
 344        }
 345
 346        for (i = 0; i < max_slots; ++i) {
 347                slot = &slots[i];
 348
 349                if (!*slot) {
 350                        *slot = bp;
 351                        break;
 352                }
 353        }
 354
 355        if (i == max_slots) {
 356                pr_warn("Can't find any breakpoint slot\n");
 357                return -EBUSY;
 358        }
 359
 360        /* Override the breakpoint data with the step data. */
 361        if (info->step_ctrl.enabled) {
 362                addr = info->trigger & ~0x3;
 363                ctrl = encode_ctrl_reg(info->step_ctrl);
 364                if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
 365                        i = 0;
 366                        ctrl_base = ARM_BASE_BCR + core_num_brps;
 367                        val_base = ARM_BASE_BVR + core_num_brps;
 368                }
 369        }
 370
 371        /* Setup the address register. */
 372        write_wb_reg(val_base + i, addr);
 373
 374        /* Setup the control register. */
 375        write_wb_reg(ctrl_base + i, ctrl);
 376        return 0;
 377}
 378
 379void arch_uninstall_hw_breakpoint(struct perf_event *bp)
 380{
 381        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 382        struct perf_event **slot, **slots;
 383        int i, max_slots, base;
 384
 385        if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
 386                /* Breakpoint */
 387                base = ARM_BASE_BCR;
 388                slots = this_cpu_ptr(bp_on_reg);
 389                max_slots = core_num_brps;
 390        } else {
 391                /* Watchpoint */
 392                base = ARM_BASE_WCR;
 393                slots = this_cpu_ptr(wp_on_reg);
 394                max_slots = core_num_wrps;
 395        }
 396
 397        /* Remove the breakpoint. */
 398        for (i = 0; i < max_slots; ++i) {
 399                slot = &slots[i];
 400
 401                if (*slot == bp) {
 402                        *slot = NULL;
 403                        break;
 404                }
 405        }
 406
 407        if (i == max_slots) {
 408                pr_warn("Can't find any breakpoint slot\n");
 409                return;
 410        }
 411
 412        /* Ensure that we disable the mismatch breakpoint. */
 413        if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
 414            info->step_ctrl.enabled) {
 415                i = 0;
 416                base = ARM_BASE_BCR + core_num_brps;
 417        }
 418
 419        /* Reset the control register. */
 420        write_wb_reg(base + i, 0);
 421}
 422
 423static int get_hbp_len(u8 hbp_len)
 424{
 425        unsigned int len_in_bytes = 0;
 426
 427        switch (hbp_len) {
 428        case ARM_BREAKPOINT_LEN_1:
 429                len_in_bytes = 1;
 430                break;
 431        case ARM_BREAKPOINT_LEN_2:
 432                len_in_bytes = 2;
 433                break;
 434        case ARM_BREAKPOINT_LEN_4:
 435                len_in_bytes = 4;
 436                break;
 437        case ARM_BREAKPOINT_LEN_8:
 438                len_in_bytes = 8;
 439                break;
 440        }
 441
 442        return len_in_bytes;
 443}
 444
 445/*
 446 * Check whether bp virtual address is in kernel space.
 447 */
 448int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 449{
 450        unsigned int len;
 451        unsigned long va;
 452
 453        va = hw->address;
 454        len = get_hbp_len(hw->ctrl.len);
 455
 456        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 457}
 458
 459/*
 460 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
 461 * Hopefully this will disappear when ptrace can bypass the conversion
 462 * to generic breakpoint descriptions.
 463 */
 464int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
 465                           int *gen_len, int *gen_type)
 466{
 467        /* Type */
 468        switch (ctrl.type) {
 469        case ARM_BREAKPOINT_EXECUTE:
 470                *gen_type = HW_BREAKPOINT_X;
 471                break;
 472        case ARM_BREAKPOINT_LOAD:
 473                *gen_type = HW_BREAKPOINT_R;
 474                break;
 475        case ARM_BREAKPOINT_STORE:
 476                *gen_type = HW_BREAKPOINT_W;
 477                break;
 478        case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
 479                *gen_type = HW_BREAKPOINT_RW;
 480                break;
 481        default:
 482                return -EINVAL;
 483        }
 484
 485        /* Len */
 486        switch (ctrl.len) {
 487        case ARM_BREAKPOINT_LEN_1:
 488                *gen_len = HW_BREAKPOINT_LEN_1;
 489                break;
 490        case ARM_BREAKPOINT_LEN_2:
 491                *gen_len = HW_BREAKPOINT_LEN_2;
 492                break;
 493        case ARM_BREAKPOINT_LEN_4:
 494                *gen_len = HW_BREAKPOINT_LEN_4;
 495                break;
 496        case ARM_BREAKPOINT_LEN_8:
 497                *gen_len = HW_BREAKPOINT_LEN_8;
 498                break;
 499        default:
 500                return -EINVAL;
 501        }
 502
 503        return 0;
 504}
 505
 506/*
 507 * Construct an arch_hw_breakpoint from a perf_event.
 508 */
 509static int arch_build_bp_info(struct perf_event *bp,
 510                              const struct perf_event_attr *attr,
 511                              struct arch_hw_breakpoint *hw)
 512{
 513        /* Type */
 514        switch (attr->bp_type) {
 515        case HW_BREAKPOINT_X:
 516                hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
 517                break;
 518        case HW_BREAKPOINT_R:
 519                hw->ctrl.type = ARM_BREAKPOINT_LOAD;
 520                break;
 521        case HW_BREAKPOINT_W:
 522                hw->ctrl.type = ARM_BREAKPOINT_STORE;
 523                break;
 524        case HW_BREAKPOINT_RW:
 525                hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
 526                break;
 527        default:
 528                return -EINVAL;
 529        }
 530
 531        /* Len */
 532        switch (attr->bp_len) {
 533        case HW_BREAKPOINT_LEN_1:
 534                hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
 535                break;
 536        case HW_BREAKPOINT_LEN_2:
 537                hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
 538                break;
 539        case HW_BREAKPOINT_LEN_4:
 540                hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
 541                break;
 542        case HW_BREAKPOINT_LEN_8:
 543                hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
 544                if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
 545                        && max_watchpoint_len >= 8)
 546                        break;
 547                /* Else, fall through */
 548        default:
 549                return -EINVAL;
 550        }
 551
 552        /*
 553         * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
 554         * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
 555         * by the hardware and must be aligned to the appropriate number of
 556         * bytes.
 557         */
 558        if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
 559            hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
 560            hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
 561                return -EINVAL;
 562
 563        /* Address */
 564        hw->address = attr->bp_addr;
 565
 566        /* Privilege */
 567        hw->ctrl.privilege = ARM_BREAKPOINT_USER;
 568        if (arch_check_bp_in_kernelspace(hw))
 569                hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
 570
 571        /* Enabled? */
 572        hw->ctrl.enabled = !attr->disabled;
 573
 574        /* Mismatch */
 575        hw->ctrl.mismatch = 0;
 576
 577        return 0;
 578}
 579
 580/*
 581 * Validate the arch-specific HW Breakpoint register settings.
 582 */
 583int hw_breakpoint_arch_parse(struct perf_event *bp,
 584                             const struct perf_event_attr *attr,
 585                             struct arch_hw_breakpoint *hw)
 586{
 587        int ret = 0;
 588        u32 offset, alignment_mask = 0x3;
 589
 590        /* Ensure that we are in monitor debug mode. */
 591        if (!monitor_mode_enabled())
 592                return -ENODEV;
 593
 594        /* Build the arch_hw_breakpoint. */
 595        ret = arch_build_bp_info(bp, attr, hw);
 596        if (ret)
 597                goto out;
 598
 599        /* Check address alignment. */
 600        if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
 601                alignment_mask = 0x7;
 602        offset = hw->address & alignment_mask;
 603        switch (offset) {
 604        case 0:
 605                /* Aligned */
 606                break;
 607        case 1:
 608        case 2:
 609                /* Allow halfword watchpoints and breakpoints. */
 610                if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
 611                        break;
 612                /* Else, fall through */
 613        case 3:
 614                /* Allow single byte watchpoint. */
 615                if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
 616                        break;
 617                /* Else, fall through */
 618        default:
 619                ret = -EINVAL;
 620                goto out;
 621        }
 622
 623        hw->address &= ~alignment_mask;
 624        hw->ctrl.len <<= offset;
 625
 626        if (is_default_overflow_handler(bp)) {
 627                /*
 628                 * Mismatch breakpoints are required for single-stepping
 629                 * breakpoints.
 630                 */
 631                if (!core_has_mismatch_brps())
 632                        return -EINVAL;
 633
 634                /* We don't allow mismatch breakpoints in kernel space. */
 635                if (arch_check_bp_in_kernelspace(hw))
 636                        return -EPERM;
 637
 638                /*
 639                 * Per-cpu breakpoints are not supported by our stepping
 640                 * mechanism.
 641                 */
 642                if (!bp->hw.target)
 643                        return -EINVAL;
 644
 645                /*
 646                 * We only support specific access types if the fsr
 647                 * reports them.
 648                 */
 649                if (!debug_exception_updates_fsr() &&
 650                    (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
 651                     hw->ctrl.type == ARM_BREAKPOINT_STORE))
 652                        return -EINVAL;
 653        }
 654
 655out:
 656        return ret;
 657}
 658
 659/*
 660 * Enable/disable single-stepping over the breakpoint bp at address addr.
 661 */
 662static void enable_single_step(struct perf_event *bp, u32 addr)
 663{
 664        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 665
 666        arch_uninstall_hw_breakpoint(bp);
 667        info->step_ctrl.mismatch  = 1;
 668        info->step_ctrl.len       = ARM_BREAKPOINT_LEN_4;
 669        info->step_ctrl.type      = ARM_BREAKPOINT_EXECUTE;
 670        info->step_ctrl.privilege = info->ctrl.privilege;
 671        info->step_ctrl.enabled   = 1;
 672        info->trigger             = addr;
 673        arch_install_hw_breakpoint(bp);
 674}
 675
 676static void disable_single_step(struct perf_event *bp)
 677{
 678        arch_uninstall_hw_breakpoint(bp);
 679        counter_arch_bp(bp)->step_ctrl.enabled = 0;
 680        arch_install_hw_breakpoint(bp);
 681}
 682
 683static void watchpoint_handler(unsigned long addr, unsigned int fsr,
 684                               struct pt_regs *regs)
 685{
 686        int i, access;
 687        u32 val, ctrl_reg, alignment_mask;
 688        struct perf_event *wp, **slots;
 689        struct arch_hw_breakpoint *info;
 690        struct arch_hw_breakpoint_ctrl ctrl;
 691
 692        slots = this_cpu_ptr(wp_on_reg);
 693
 694        for (i = 0; i < core_num_wrps; ++i) {
 695                rcu_read_lock();
 696
 697                wp = slots[i];
 698
 699                if (wp == NULL)
 700                        goto unlock;
 701
 702                info = counter_arch_bp(wp);
 703                /*
 704                 * The DFAR is an unknown value on debug architectures prior
 705                 * to 7.1. Since we only allow a single watchpoint on these
 706                 * older CPUs, we can set the trigger to the lowest possible
 707                 * faulting address.
 708                 */
 709                if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
 710                        BUG_ON(i > 0);
 711                        info->trigger = wp->attr.bp_addr;
 712                } else {
 713                        if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
 714                                alignment_mask = 0x7;
 715                        else
 716                                alignment_mask = 0x3;
 717
 718                        /* Check if the watchpoint value matches. */
 719                        val = read_wb_reg(ARM_BASE_WVR + i);
 720                        if (val != (addr & ~alignment_mask))
 721                                goto unlock;
 722
 723                        /* Possible match, check the byte address select. */
 724                        ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
 725                        decode_ctrl_reg(ctrl_reg, &ctrl);
 726                        if (!((1 << (addr & alignment_mask)) & ctrl.len))
 727                                goto unlock;
 728
 729                        /* Check that the access type matches. */
 730                        if (debug_exception_updates_fsr()) {
 731                                access = (fsr & ARM_FSR_ACCESS_MASK) ?
 732                                          HW_BREAKPOINT_W : HW_BREAKPOINT_R;
 733                                if (!(access & hw_breakpoint_type(wp)))
 734                                        goto unlock;
 735                        }
 736
 737                        /* We have a winner. */
 738                        info->trigger = addr;
 739                }
 740
 741                pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
 742                perf_bp_event(wp, regs);
 743
 744                /*
 745                 * If no overflow handler is present, insert a temporary
 746                 * mismatch breakpoint so we can single-step over the
 747                 * watchpoint trigger.
 748                 */
 749                if (is_default_overflow_handler(wp))
 750                        enable_single_step(wp, instruction_pointer(regs));
 751
 752unlock:
 753                rcu_read_unlock();
 754        }
 755}
 756
 757static void watchpoint_single_step_handler(unsigned long pc)
 758{
 759        int i;
 760        struct perf_event *wp, **slots;
 761        struct arch_hw_breakpoint *info;
 762
 763        slots = this_cpu_ptr(wp_on_reg);
 764
 765        for (i = 0; i < core_num_wrps; ++i) {
 766                rcu_read_lock();
 767
 768                wp = slots[i];
 769
 770                if (wp == NULL)
 771                        goto unlock;
 772
 773                info = counter_arch_bp(wp);
 774                if (!info->step_ctrl.enabled)
 775                        goto unlock;
 776
 777                /*
 778                 * Restore the original watchpoint if we've completed the
 779                 * single-step.
 780                 */
 781                if (info->trigger != pc)
 782                        disable_single_step(wp);
 783
 784unlock:
 785                rcu_read_unlock();
 786        }
 787}
 788
 789static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
 790{
 791        int i;
 792        u32 ctrl_reg, val, addr;
 793        struct perf_event *bp, **slots;
 794        struct arch_hw_breakpoint *info;
 795        struct arch_hw_breakpoint_ctrl ctrl;
 796
 797        slots = this_cpu_ptr(bp_on_reg);
 798
 799        /* The exception entry code places the amended lr in the PC. */
 800        addr = regs->ARM_pc;
 801
 802        /* Check the currently installed breakpoints first. */
 803        for (i = 0; i < core_num_brps; ++i) {
 804                rcu_read_lock();
 805
 806                bp = slots[i];
 807
 808                if (bp == NULL)
 809                        goto unlock;
 810
 811                info = counter_arch_bp(bp);
 812
 813                /* Check if the breakpoint value matches. */
 814                val = read_wb_reg(ARM_BASE_BVR + i);
 815                if (val != (addr & ~0x3))
 816                        goto mismatch;
 817
 818                /* Possible match, check the byte address select to confirm. */
 819                ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
 820                decode_ctrl_reg(ctrl_reg, &ctrl);
 821                if ((1 << (addr & 0x3)) & ctrl.len) {
 822                        info->trigger = addr;
 823                        pr_debug("breakpoint fired: address = 0x%x\n", addr);
 824                        perf_bp_event(bp, regs);
 825                        if (!bp->overflow_handler)
 826                                enable_single_step(bp, addr);
 827                        goto unlock;
 828                }
 829
 830mismatch:
 831                /* If we're stepping a breakpoint, it can now be restored. */
 832                if (info->step_ctrl.enabled)
 833                        disable_single_step(bp);
 834unlock:
 835                rcu_read_unlock();
 836        }
 837
 838        /* Handle any pending watchpoint single-step breakpoints. */
 839        watchpoint_single_step_handler(addr);
 840}
 841
 842/*
 843 * Called from either the Data Abort Handler [watchpoint] or the
 844 * Prefetch Abort Handler [breakpoint] with interrupts disabled.
 845 */
 846static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
 847                                 struct pt_regs *regs)
 848{
 849        int ret = 0;
 850        u32 dscr;
 851
 852        preempt_disable();
 853
 854        if (interrupts_enabled(regs))
 855                local_irq_enable();
 856
 857        /* We only handle watchpoints and hardware breakpoints. */
 858        ARM_DBG_READ(c0, c1, 0, dscr);
 859
 860        /* Perform perf callbacks. */
 861        switch (ARM_DSCR_MOE(dscr)) {
 862        case ARM_ENTRY_BREAKPOINT:
 863                breakpoint_handler(addr, regs);
 864                break;
 865        case ARM_ENTRY_ASYNC_WATCHPOINT:
 866                WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
 867                /* Fall through */
 868        case ARM_ENTRY_SYNC_WATCHPOINT:
 869                watchpoint_handler(addr, fsr, regs);
 870                break;
 871        default:
 872                ret = 1; /* Unhandled fault. */
 873        }
 874
 875        preempt_enable();
 876
 877        return ret;
 878}
 879
 880/*
 881 * One-time initialisation.
 882 */
 883static cpumask_t debug_err_mask;
 884
 885static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
 886{
 887        int cpu = smp_processor_id();
 888
 889        pr_warn("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
 890                instr, cpu);
 891
 892        /* Set the error flag for this CPU and skip the faulting instruction. */
 893        cpumask_set_cpu(cpu, &debug_err_mask);
 894        instruction_pointer(regs) += 4;
 895        return 0;
 896}
 897
 898static struct undef_hook debug_reg_hook = {
 899        .instr_mask     = 0x0fe80f10,
 900        .instr_val      = 0x0e000e10,
 901        .fn             = debug_reg_trap,
 902};
 903
 904/* Does this core support OS Save and Restore? */
 905static bool core_has_os_save_restore(void)
 906{
 907        u32 oslsr;
 908
 909        switch (get_debug_arch()) {
 910        case ARM_DEBUG_ARCH_V7_1:
 911                return true;
 912        case ARM_DEBUG_ARCH_V7_ECP14:
 913                ARM_DBG_READ(c1, c1, 4, oslsr);
 914                if (oslsr & ARM_OSLSR_OSLM0)
 915                        return true;
 916                /* Else, fall through */
 917        default:
 918                return false;
 919        }
 920}
 921
 922static void reset_ctrl_regs(unsigned int cpu)
 923{
 924        int i, raw_num_brps, err = 0;
 925        u32 val;
 926
 927        /*
 928         * v7 debug contains save and restore registers so that debug state
 929         * can be maintained across low-power modes without leaving the debug
 930         * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
 931         * the debug registers out of reset, so we must unlock the OS Lock
 932         * Access Register to avoid taking undefined instruction exceptions
 933         * later on.
 934         */
 935        switch (debug_arch) {
 936        case ARM_DEBUG_ARCH_V6:
 937        case ARM_DEBUG_ARCH_V6_1:
 938                /* ARMv6 cores clear the registers out of reset. */
 939                goto out_mdbgen;
 940        case ARM_DEBUG_ARCH_V7_ECP14:
 941                /*
 942                 * Ensure sticky power-down is clear (i.e. debug logic is
 943                 * powered up).
 944                 */
 945                ARM_DBG_READ(c1, c5, 4, val);
 946                if ((val & 0x1) == 0)
 947                        err = -EPERM;
 948
 949                if (!has_ossr)
 950                        goto clear_vcr;
 951                break;
 952        case ARM_DEBUG_ARCH_V7_1:
 953                /*
 954                 * Ensure the OS double lock is clear.
 955                 */
 956                ARM_DBG_READ(c1, c3, 4, val);
 957                if ((val & 0x1) == 1)
 958                        err = -EPERM;
 959                break;
 960        }
 961
 962        if (err) {
 963                pr_warn_once("CPU %d debug is powered down!\n", cpu);
 964                cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
 965                return;
 966        }
 967
 968        /*
 969         * Unconditionally clear the OS lock by writing a value
 970         * other than CS_LAR_KEY to the access register.
 971         */
 972        ARM_DBG_WRITE(c1, c0, 4, ~CORESIGHT_UNLOCK);
 973        isb();
 974
 975        /*
 976         * Clear any configured vector-catch events before
 977         * enabling monitor mode.
 978         */
 979clear_vcr:
 980        ARM_DBG_WRITE(c0, c7, 0, 0);
 981        isb();
 982
 983        if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
 984                pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
 985                return;
 986        }
 987
 988        /*
 989         * The control/value register pairs are UNKNOWN out of reset so
 990         * clear them to avoid spurious debug events.
 991         */
 992        raw_num_brps = get_num_brp_resources();
 993        for (i = 0; i < raw_num_brps; ++i) {
 994                write_wb_reg(ARM_BASE_BCR + i, 0UL);
 995                write_wb_reg(ARM_BASE_BVR + i, 0UL);
 996        }
 997
 998        for (i = 0; i < core_num_wrps; ++i) {
 999                write_wb_reg(ARM_BASE_WCR + i, 0UL);
1000                write_wb_reg(ARM_BASE_WVR + i, 0UL);
1001        }
1002
1003        if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
1004                pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
1005                return;
1006        }
1007
1008        /*
1009         * Have a crack at enabling monitor mode. We don't actually need
1010         * it yet, but reporting an error early is useful if it fails.
1011         */
1012out_mdbgen:
1013        if (enable_monitor_mode())
1014                cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
1015}
1016
1017static int dbg_reset_online(unsigned int cpu)
1018{
1019        local_irq_disable();
1020        reset_ctrl_regs(cpu);
1021        local_irq_enable();
1022        return 0;
1023}
1024
1025#ifdef CONFIG_CPU_PM
1026static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
1027                             void *v)
1028{
1029        if (action == CPU_PM_EXIT)
1030                reset_ctrl_regs(smp_processor_id());
1031
1032        return NOTIFY_OK;
1033}
1034
1035static struct notifier_block dbg_cpu_pm_nb = {
1036        .notifier_call = dbg_cpu_pm_notify,
1037};
1038
1039static void __init pm_init(void)
1040{
1041        cpu_pm_register_notifier(&dbg_cpu_pm_nb);
1042}
1043#else
1044static inline void pm_init(void)
1045{
1046}
1047#endif
1048
1049static int __init arch_hw_breakpoint_init(void)
1050{
1051        int ret;
1052
1053        debug_arch = get_debug_arch();
1054
1055        if (!debug_arch_supported()) {
1056                pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
1057                return 0;
1058        }
1059
1060        /*
1061         * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
1062         * whenever a WFI is issued, even if the core is not powered down, in
1063         * violation of the architecture.  When DBGPRSR.SPD is set, accesses to
1064         * breakpoint and watchpoint registers are treated as undefined, so
1065         * this results in boot time and runtime failures when these are
1066         * accessed and we unexpectedly take a trap.
1067         *
1068         * It's not clear if/how this can be worked around, so we blacklist
1069         * Scorpion CPUs to avoid these issues.
1070        */
1071        if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
1072                pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
1073                return 0;
1074        }
1075
1076        has_ossr = core_has_os_save_restore();
1077
1078        /* Determine how many BRPs/WRPs are available. */
1079        core_num_brps = get_num_brps();
1080        core_num_wrps = get_num_wrps();
1081
1082        /*
1083         * We need to tread carefully here because DBGSWENABLE may be
1084         * driven low on this core and there isn't an architected way to
1085         * determine that.
1086         */
1087        cpus_read_lock();
1088        register_undef_hook(&debug_reg_hook);
1089
1090        /*
1091         * Register CPU notifier which resets the breakpoint resources. We
1092         * assume that a halting debugger will leave the world in a nice state
1093         * for us.
1094         */
1095        ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN,
1096                                           "arm/hw_breakpoint:online",
1097                                           dbg_reset_online, NULL);
1098        unregister_undef_hook(&debug_reg_hook);
1099        if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
1100                core_num_brps = 0;
1101                core_num_wrps = 0;
1102                if (ret > 0)
1103                        cpuhp_remove_state_nocalls_cpuslocked(ret);
1104                cpus_read_unlock();
1105                return 0;
1106        }
1107
1108        pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
1109                core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
1110                "", core_num_wrps);
1111
1112        /* Work out the maximum supported watchpoint length. */
1113        max_watchpoint_len = get_max_wp_len();
1114        pr_info("maximum watchpoint size is %u bytes.\n",
1115                        max_watchpoint_len);
1116
1117        /* Register debug fault handler. */
1118        hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1119                        TRAP_HWBKPT, "watchpoint debug exception");
1120        hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1121                        TRAP_HWBKPT, "breakpoint debug exception");
1122        cpus_read_unlock();
1123
1124        /* Register PM notifiers. */
1125        pm_init();
1126        return 0;
1127}
1128arch_initcall(arch_hw_breakpoint_init);
1129
1130void hw_breakpoint_pmu_read(struct perf_event *bp)
1131{
1132}
1133
1134/*
1135 * Dummy function to register with die_notifier.
1136 */
1137int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
1138                                        unsigned long val, void *data)
1139{
1140        return NOTIFY_DONE;
1141}
1142