linux/arch/arm/kernel/hw_breakpoint.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (C) 2009, 2010 ARM Limited
   5 *
   6 * Author: Will Deacon <will.deacon@arm.com>
   7 */
   8
   9/*
  10 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  11 * using the CPU's debug registers.
  12 */
  13#define pr_fmt(fmt) "hw-breakpoint: " fmt
  14
  15#include <linux/errno.h>
  16#include <linux/hardirq.h>
  17#include <linux/perf_event.h>
  18#include <linux/hw_breakpoint.h>
  19#include <linux/smp.h>
  20#include <linux/cpu_pm.h>
  21#include <linux/coresight.h>
  22
  23#include <asm/cacheflush.h>
  24#include <asm/cputype.h>
  25#include <asm/current.h>
  26#include <asm/hw_breakpoint.h>
  27#include <asm/traps.h>
  28
  29/* Breakpoint currently in use for each BRP. */
  30static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
  31
  32/* Watchpoint currently in use for each WRP. */
  33static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
  34
  35/* Number of BRP/WRP registers on this CPU. */
  36static int core_num_brps __ro_after_init;
  37static int core_num_wrps __ro_after_init;
  38
  39/* Debug architecture version. */
  40static u8 debug_arch __ro_after_init;
  41
  42/* Does debug architecture support OS Save and Restore? */
  43static bool has_ossr __ro_after_init;
  44
  45/* Maximum supported watchpoint length. */
  46static u8 max_watchpoint_len __ro_after_init;
  47
  48#define READ_WB_REG_CASE(OP2, M, VAL)                   \
  49        case ((OP2 << 4) + M):                          \
  50                ARM_DBG_READ(c0, c ## M, OP2, VAL);     \
  51                break
  52
  53#define WRITE_WB_REG_CASE(OP2, M, VAL)                  \
  54        case ((OP2 << 4) + M):                          \
  55                ARM_DBG_WRITE(c0, c ## M, OP2, VAL);    \
  56                break
  57
  58#define GEN_READ_WB_REG_CASES(OP2, VAL)         \
  59        READ_WB_REG_CASE(OP2, 0, VAL);          \
  60        READ_WB_REG_CASE(OP2, 1, VAL);          \
  61        READ_WB_REG_CASE(OP2, 2, VAL);          \
  62        READ_WB_REG_CASE(OP2, 3, VAL);          \
  63        READ_WB_REG_CASE(OP2, 4, VAL);          \
  64        READ_WB_REG_CASE(OP2, 5, VAL);          \
  65        READ_WB_REG_CASE(OP2, 6, VAL);          \
  66        READ_WB_REG_CASE(OP2, 7, VAL);          \
  67        READ_WB_REG_CASE(OP2, 8, VAL);          \
  68        READ_WB_REG_CASE(OP2, 9, VAL);          \
  69        READ_WB_REG_CASE(OP2, 10, VAL);         \
  70        READ_WB_REG_CASE(OP2, 11, VAL);         \
  71        READ_WB_REG_CASE(OP2, 12, VAL);         \
  72        READ_WB_REG_CASE(OP2, 13, VAL);         \
  73        READ_WB_REG_CASE(OP2, 14, VAL);         \
  74        READ_WB_REG_CASE(OP2, 15, VAL)
  75
  76#define GEN_WRITE_WB_REG_CASES(OP2, VAL)        \
  77        WRITE_WB_REG_CASE(OP2, 0, VAL);         \
  78        WRITE_WB_REG_CASE(OP2, 1, VAL);         \
  79        WRITE_WB_REG_CASE(OP2, 2, VAL);         \
  80        WRITE_WB_REG_CASE(OP2, 3, VAL);         \
  81        WRITE_WB_REG_CASE(OP2, 4, VAL);         \
  82        WRITE_WB_REG_CASE(OP2, 5, VAL);         \
  83        WRITE_WB_REG_CASE(OP2, 6, VAL);         \
  84        WRITE_WB_REG_CASE(OP2, 7, VAL);         \
  85        WRITE_WB_REG_CASE(OP2, 8, VAL);         \
  86        WRITE_WB_REG_CASE(OP2, 9, VAL);         \
  87        WRITE_WB_REG_CASE(OP2, 10, VAL);        \
  88        WRITE_WB_REG_CASE(OP2, 11, VAL);        \
  89        WRITE_WB_REG_CASE(OP2, 12, VAL);        \
  90        WRITE_WB_REG_CASE(OP2, 13, VAL);        \
  91        WRITE_WB_REG_CASE(OP2, 14, VAL);        \
  92        WRITE_WB_REG_CASE(OP2, 15, VAL)
  93
  94static u32 read_wb_reg(int n)
  95{
  96        u32 val = 0;
  97
  98        switch (n) {
  99        GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
 100        GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
 101        GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
 102        GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
 103        default:
 104                pr_warn("attempt to read from unknown breakpoint register %d\n",
 105                        n);
 106        }
 107
 108        return val;
 109}
 110
 111static void write_wb_reg(int n, u32 val)
 112{
 113        switch (n) {
 114        GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
 115        GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
 116        GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
 117        GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
 118        default:
 119                pr_warn("attempt to write to unknown breakpoint register %d\n",
 120                        n);
 121        }
 122        isb();
 123}
 124
 125/* Determine debug architecture. */
 126static u8 get_debug_arch(void)
 127{
 128        u32 didr;
 129
 130        /* Do we implement the extended CPUID interface? */
 131        if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
 132                pr_warn_once("CPUID feature registers not supported. "
 133                             "Assuming v6 debug is present.\n");
 134                return ARM_DEBUG_ARCH_V6;
 135        }
 136
 137        ARM_DBG_READ(c0, c0, 0, didr);
 138        return (didr >> 16) & 0xf;
 139}
 140
 141u8 arch_get_debug_arch(void)
 142{
 143        return debug_arch;
 144}
 145
 146static int debug_arch_supported(void)
 147{
 148        u8 arch = get_debug_arch();
 149
 150        /* We don't support the memory-mapped interface. */
 151        return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
 152                arch >= ARM_DEBUG_ARCH_V7_1;
 153}
 154
 155/* Can we determine the watchpoint access type from the fsr? */
 156static int debug_exception_updates_fsr(void)
 157{
 158        return get_debug_arch() >= ARM_DEBUG_ARCH_V8;
 159}
 160
 161/* Determine number of WRP registers available. */
 162static int get_num_wrp_resources(void)
 163{
 164        u32 didr;
 165        ARM_DBG_READ(c0, c0, 0, didr);
 166        return ((didr >> 28) & 0xf) + 1;
 167}
 168
 169/* Determine number of BRP registers available. */
 170static int get_num_brp_resources(void)
 171{
 172        u32 didr;
 173        ARM_DBG_READ(c0, c0, 0, didr);
 174        return ((didr >> 24) & 0xf) + 1;
 175}
 176
 177/* Does this core support mismatch breakpoints? */
 178static int core_has_mismatch_brps(void)
 179{
 180        return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
 181                get_num_brp_resources() > 1);
 182}
 183
 184/* Determine number of usable WRPs available. */
 185static int get_num_wrps(void)
 186{
 187        /*
 188         * On debug architectures prior to 7.1, when a watchpoint fires, the
 189         * only way to work out which watchpoint it was is by disassembling
 190         * the faulting instruction and working out the address of the memory
 191         * access.
 192         *
 193         * Furthermore, we can only do this if the watchpoint was precise
 194         * since imprecise watchpoints prevent us from calculating register
 195         * based addresses.
 196         *
 197         * Providing we have more than 1 breakpoint register, we only report
 198         * a single watchpoint register for the time being. This way, we always
 199         * know which watchpoint fired. In the future we can either add a
 200         * disassembler and address generation emulator, or we can insert a
 201         * check to see if the DFAR is set on watchpoint exception entry
 202         * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
 203         * that it is set on some implementations].
 204         */
 205        if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
 206                return 1;
 207
 208        return get_num_wrp_resources();
 209}
 210
 211/* Determine number of usable BRPs available. */
 212static int get_num_brps(void)
 213{
 214        int brps = get_num_brp_resources();
 215        return core_has_mismatch_brps() ? brps - 1 : brps;
 216}
 217
 218/*
 219 * In order to access the breakpoint/watchpoint control registers,
 220 * we must be running in debug monitor mode. Unfortunately, we can
 221 * be put into halting debug mode at any time by an external debugger
 222 * but there is nothing we can do to prevent that.
 223 */
 224static int monitor_mode_enabled(void)
 225{
 226        u32 dscr;
 227        ARM_DBG_READ(c0, c1, 0, dscr);
 228        return !!(dscr & ARM_DSCR_MDBGEN);
 229}
 230
 231static int enable_monitor_mode(void)
 232{
 233        u32 dscr;
 234        ARM_DBG_READ(c0, c1, 0, dscr);
 235
 236        /* If monitor mode is already enabled, just return. */
 237        if (dscr & ARM_DSCR_MDBGEN)
 238                goto out;
 239
 240        /* Write to the corresponding DSCR. */
 241        switch (get_debug_arch()) {
 242        case ARM_DEBUG_ARCH_V6:
 243        case ARM_DEBUG_ARCH_V6_1:
 244                ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN));
 245                break;
 246        case ARM_DEBUG_ARCH_V7_ECP14:
 247        case ARM_DEBUG_ARCH_V7_1:
 248        case ARM_DEBUG_ARCH_V8:
 249                ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
 250                isb();
 251                break;
 252        default:
 253                return -ENODEV;
 254        }
 255
 256        /* Check that the write made it through. */
 257        ARM_DBG_READ(c0, c1, 0, dscr);
 258        if (!(dscr & ARM_DSCR_MDBGEN)) {
 259                pr_warn_once("Failed to enable monitor mode on CPU %d.\n",
 260                                smp_processor_id());
 261                return -EPERM;
 262        }
 263
 264out:
 265        return 0;
 266}
 267
 268int hw_breakpoint_slots(int type)
 269{
 270        if (!debug_arch_supported())
 271                return 0;
 272
 273        /*
 274         * We can be called early, so don't rely on
 275         * our static variables being initialised.
 276         */
 277        switch (type) {
 278        case TYPE_INST:
 279                return get_num_brps();
 280        case TYPE_DATA:
 281                return get_num_wrps();
 282        default:
 283                pr_warn("unknown slot type: %d\n", type);
 284                return 0;
 285        }
 286}
 287
 288/*
 289 * Check if 8-bit byte-address select is available.
 290 * This clobbers WRP 0.
 291 */
 292static u8 get_max_wp_len(void)
 293{
 294        u32 ctrl_reg;
 295        struct arch_hw_breakpoint_ctrl ctrl;
 296        u8 size = 4;
 297
 298        if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
 299                goto out;
 300
 301        memset(&ctrl, 0, sizeof(ctrl));
 302        ctrl.len = ARM_BREAKPOINT_LEN_8;
 303        ctrl_reg = encode_ctrl_reg(ctrl);
 304
 305        write_wb_reg(ARM_BASE_WVR, 0);
 306        write_wb_reg(ARM_BASE_WCR, ctrl_reg);
 307        if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
 308                size = 8;
 309
 310out:
 311        return size;
 312}
 313
 314u8 arch_get_max_wp_len(void)
 315{
 316        return max_watchpoint_len;
 317}
 318
 319/*
 320 * Install a perf counter breakpoint.
 321 */
 322int arch_install_hw_breakpoint(struct perf_event *bp)
 323{
 324        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 325        struct perf_event **slot, **slots;
 326        int i, max_slots, ctrl_base, val_base;
 327        u32 addr, ctrl;
 328
 329        addr = info->address;
 330        ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
 331
 332        if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
 333                /* Breakpoint */
 334                ctrl_base = ARM_BASE_BCR;
 335                val_base = ARM_BASE_BVR;
 336                slots = this_cpu_ptr(bp_on_reg);
 337                max_slots = core_num_brps;
 338        } else {
 339                /* Watchpoint */
 340                ctrl_base = ARM_BASE_WCR;
 341                val_base = ARM_BASE_WVR;
 342                slots = this_cpu_ptr(wp_on_reg);
 343                max_slots = core_num_wrps;
 344        }
 345
 346        for (i = 0; i < max_slots; ++i) {
 347                slot = &slots[i];
 348
 349                if (!*slot) {
 350                        *slot = bp;
 351                        break;
 352                }
 353        }
 354
 355        if (i == max_slots) {
 356                pr_warn("Can't find any breakpoint slot\n");
 357                return -EBUSY;
 358        }
 359
 360        /* Override the breakpoint data with the step data. */
 361        if (info->step_ctrl.enabled) {
 362                addr = info->trigger & ~0x3;
 363                ctrl = encode_ctrl_reg(info->step_ctrl);
 364                if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
 365                        i = 0;
 366                        ctrl_base = ARM_BASE_BCR + core_num_brps;
 367                        val_base = ARM_BASE_BVR + core_num_brps;
 368                }
 369        }
 370
 371        /* Setup the address register. */
 372        write_wb_reg(val_base + i, addr);
 373
 374        /* Setup the control register. */
 375        write_wb_reg(ctrl_base + i, ctrl);
 376        return 0;
 377}
 378
 379void arch_uninstall_hw_breakpoint(struct perf_event *bp)
 380{
 381        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 382        struct perf_event **slot, **slots;
 383        int i, max_slots, base;
 384
 385        if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
 386                /* Breakpoint */
 387                base = ARM_BASE_BCR;
 388                slots = this_cpu_ptr(bp_on_reg);
 389                max_slots = core_num_brps;
 390        } else {
 391                /* Watchpoint */
 392                base = ARM_BASE_WCR;
 393                slots = this_cpu_ptr(wp_on_reg);
 394                max_slots = core_num_wrps;
 395        }
 396
 397        /* Remove the breakpoint. */
 398        for (i = 0; i < max_slots; ++i) {
 399                slot = &slots[i];
 400
 401                if (*slot == bp) {
 402                        *slot = NULL;
 403                        break;
 404                }
 405        }
 406
 407        if (i == max_slots) {
 408                pr_warn("Can't find any breakpoint slot\n");
 409                return;
 410        }
 411
 412        /* Ensure that we disable the mismatch breakpoint. */
 413        if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
 414            info->step_ctrl.enabled) {
 415                i = 0;
 416                base = ARM_BASE_BCR + core_num_brps;
 417        }
 418
 419        /* Reset the control register. */
 420        write_wb_reg(base + i, 0);
 421}
 422
 423static int get_hbp_len(u8 hbp_len)
 424{
 425        unsigned int len_in_bytes = 0;
 426
 427        switch (hbp_len) {
 428        case ARM_BREAKPOINT_LEN_1:
 429                len_in_bytes = 1;
 430                break;
 431        case ARM_BREAKPOINT_LEN_2:
 432                len_in_bytes = 2;
 433                break;
 434        case ARM_BREAKPOINT_LEN_4:
 435                len_in_bytes = 4;
 436                break;
 437        case ARM_BREAKPOINT_LEN_8:
 438                len_in_bytes = 8;
 439                break;
 440        }
 441
 442        return len_in_bytes;
 443}
 444
 445/*
 446 * Check whether bp virtual address is in kernel space.
 447 */
 448int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 449{
 450        unsigned int len;
 451        unsigned long va;
 452
 453        va = hw->address;
 454        len = get_hbp_len(hw->ctrl.len);
 455
 456        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 457}
 458
 459/*
 460 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
 461 * Hopefully this will disappear when ptrace can bypass the conversion
 462 * to generic breakpoint descriptions.
 463 */
 464int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
 465                           int *gen_len, int *gen_type)
 466{
 467        /* Type */
 468        switch (ctrl.type) {
 469        case ARM_BREAKPOINT_EXECUTE:
 470                *gen_type = HW_BREAKPOINT_X;
 471                break;
 472        case ARM_BREAKPOINT_LOAD:
 473                *gen_type = HW_BREAKPOINT_R;
 474                break;
 475        case ARM_BREAKPOINT_STORE:
 476                *gen_type = HW_BREAKPOINT_W;
 477                break;
 478        case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
 479                *gen_type = HW_BREAKPOINT_RW;
 480                break;
 481        default:
 482                return -EINVAL;
 483        }
 484
 485        /* Len */
 486        switch (ctrl.len) {
 487        case ARM_BREAKPOINT_LEN_1:
 488                *gen_len = HW_BREAKPOINT_LEN_1;
 489                break;
 490        case ARM_BREAKPOINT_LEN_2:
 491                *gen_len = HW_BREAKPOINT_LEN_2;
 492                break;
 493        case ARM_BREAKPOINT_LEN_4:
 494                *gen_len = HW_BREAKPOINT_LEN_4;
 495                break;
 496        case ARM_BREAKPOINT_LEN_8:
 497                *gen_len = HW_BREAKPOINT_LEN_8;
 498                break;
 499        default:
 500                return -EINVAL;
 501        }
 502
 503        return 0;
 504}
 505
 506/*
 507 * Construct an arch_hw_breakpoint from a perf_event.
 508 */
 509static int arch_build_bp_info(struct perf_event *bp,
 510                              const struct perf_event_attr *attr,
 511                              struct arch_hw_breakpoint *hw)
 512{
 513        /* Type */
 514        switch (attr->bp_type) {
 515        case HW_BREAKPOINT_X:
 516                hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
 517                break;
 518        case HW_BREAKPOINT_R:
 519                hw->ctrl.type = ARM_BREAKPOINT_LOAD;
 520                break;
 521        case HW_BREAKPOINT_W:
 522                hw->ctrl.type = ARM_BREAKPOINT_STORE;
 523                break;
 524        case HW_BREAKPOINT_RW:
 525                hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
 526                break;
 527        default:
 528                return -EINVAL;
 529        }
 530
 531        /* Len */
 532        switch (attr->bp_len) {
 533        case HW_BREAKPOINT_LEN_1:
 534                hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
 535                break;
 536        case HW_BREAKPOINT_LEN_2:
 537                hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
 538                break;
 539        case HW_BREAKPOINT_LEN_4:
 540                hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
 541                break;
 542        case HW_BREAKPOINT_LEN_8:
 543                hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
 544                if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
 545                        && max_watchpoint_len >= 8)
 546                        break;
 547        default:
 548                return -EINVAL;
 549        }
 550
 551        /*
 552         * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
 553         * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
 554         * by the hardware and must be aligned to the appropriate number of
 555         * bytes.
 556         */
 557        if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
 558            hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
 559            hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
 560                return -EINVAL;
 561
 562        /* Address */
 563        hw->address = attr->bp_addr;
 564
 565        /* Privilege */
 566        hw->ctrl.privilege = ARM_BREAKPOINT_USER;
 567        if (arch_check_bp_in_kernelspace(hw))
 568                hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
 569
 570        /* Enabled? */
 571        hw->ctrl.enabled = !attr->disabled;
 572
 573        /* Mismatch */
 574        hw->ctrl.mismatch = 0;
 575
 576        return 0;
 577}
 578
 579/*
 580 * Validate the arch-specific HW Breakpoint register settings.
 581 */
 582int hw_breakpoint_arch_parse(struct perf_event *bp,
 583                             const struct perf_event_attr *attr,
 584                             struct arch_hw_breakpoint *hw)
 585{
 586        int ret = 0;
 587        u32 offset, alignment_mask = 0x3;
 588
 589        /* Ensure that we are in monitor debug mode. */
 590        if (!monitor_mode_enabled())
 591                return -ENODEV;
 592
 593        /* Build the arch_hw_breakpoint. */
 594        ret = arch_build_bp_info(bp, attr, hw);
 595        if (ret)
 596                goto out;
 597
 598        /* Check address alignment. */
 599        if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
 600                alignment_mask = 0x7;
 601        offset = hw->address & alignment_mask;
 602        switch (offset) {
 603        case 0:
 604                /* Aligned */
 605                break;
 606        case 1:
 607        case 2:
 608                /* Allow halfword watchpoints and breakpoints. */
 609                if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
 610                        break;
 611        case 3:
 612                /* Allow single byte watchpoint. */
 613                if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
 614                        break;
 615        default:
 616                ret = -EINVAL;
 617                goto out;
 618        }
 619
 620        hw->address &= ~alignment_mask;
 621        hw->ctrl.len <<= offset;
 622
 623        if (is_default_overflow_handler(bp)) {
 624                /*
 625                 * Mismatch breakpoints are required for single-stepping
 626                 * breakpoints.
 627                 */
 628                if (!core_has_mismatch_brps())
 629                        return -EINVAL;
 630
 631                /* We don't allow mismatch breakpoints in kernel space. */
 632                if (arch_check_bp_in_kernelspace(hw))
 633                        return -EPERM;
 634
 635                /*
 636                 * Per-cpu breakpoints are not supported by our stepping
 637                 * mechanism.
 638                 */
 639                if (!bp->hw.target)
 640                        return -EINVAL;
 641
 642                /*
 643                 * We only support specific access types if the fsr
 644                 * reports them.
 645                 */
 646                if (!debug_exception_updates_fsr() &&
 647                    (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
 648                     hw->ctrl.type == ARM_BREAKPOINT_STORE))
 649                        return -EINVAL;
 650        }
 651
 652out:
 653        return ret;
 654}
 655
 656/*
 657 * Enable/disable single-stepping over the breakpoint bp at address addr.
 658 */
 659static void enable_single_step(struct perf_event *bp, u32 addr)
 660{
 661        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 662
 663        arch_uninstall_hw_breakpoint(bp);
 664        info->step_ctrl.mismatch  = 1;
 665        info->step_ctrl.len       = ARM_BREAKPOINT_LEN_4;
 666        info->step_ctrl.type      = ARM_BREAKPOINT_EXECUTE;
 667        info->step_ctrl.privilege = info->ctrl.privilege;
 668        info->step_ctrl.enabled   = 1;
 669        info->trigger             = addr;
 670        arch_install_hw_breakpoint(bp);
 671}
 672
 673static void disable_single_step(struct perf_event *bp)
 674{
 675        arch_uninstall_hw_breakpoint(bp);
 676        counter_arch_bp(bp)->step_ctrl.enabled = 0;
 677        arch_install_hw_breakpoint(bp);
 678}
 679
 680static void watchpoint_handler(unsigned long addr, unsigned int fsr,
 681                               struct pt_regs *regs)
 682{
 683        int i, access;
 684        u32 val, ctrl_reg, alignment_mask;
 685        struct perf_event *wp, **slots;
 686        struct arch_hw_breakpoint *info;
 687        struct arch_hw_breakpoint_ctrl ctrl;
 688
 689        slots = this_cpu_ptr(wp_on_reg);
 690
 691        for (i = 0; i < core_num_wrps; ++i) {
 692                rcu_read_lock();
 693
 694                wp = slots[i];
 695
 696                if (wp == NULL)
 697                        goto unlock;
 698
 699                info = counter_arch_bp(wp);
 700                /*
 701                 * The DFAR is an unknown value on debug architectures prior
 702                 * to 7.1. Since we only allow a single watchpoint on these
 703                 * older CPUs, we can set the trigger to the lowest possible
 704                 * faulting address.
 705                 */
 706                if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
 707                        BUG_ON(i > 0);
 708                        info->trigger = wp->attr.bp_addr;
 709                } else {
 710                        if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
 711                                alignment_mask = 0x7;
 712                        else
 713                                alignment_mask = 0x3;
 714
 715                        /* Check if the watchpoint value matches. */
 716                        val = read_wb_reg(ARM_BASE_WVR + i);
 717                        if (val != (addr & ~alignment_mask))
 718                                goto unlock;
 719
 720                        /* Possible match, check the byte address select. */
 721                        ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
 722                        decode_ctrl_reg(ctrl_reg, &ctrl);
 723                        if (!((1 << (addr & alignment_mask)) & ctrl.len))
 724                                goto unlock;
 725
 726                        /* Check that the access type matches. */
 727                        if (debug_exception_updates_fsr()) {
 728                                access = (fsr & ARM_FSR_ACCESS_MASK) ?
 729                                          HW_BREAKPOINT_W : HW_BREAKPOINT_R;
 730                                if (!(access & hw_breakpoint_type(wp)))
 731                                        goto unlock;
 732                        }
 733
 734                        /* We have a winner. */
 735                        info->trigger = addr;
 736                }
 737
 738                pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
 739                perf_bp_event(wp, regs);
 740
 741                /*
 742                 * If no overflow handler is present, insert a temporary
 743                 * mismatch breakpoint so we can single-step over the
 744                 * watchpoint trigger.
 745                 */
 746                if (is_default_overflow_handler(wp))
 747                        enable_single_step(wp, instruction_pointer(regs));
 748
 749unlock:
 750                rcu_read_unlock();
 751        }
 752}
 753
 754static void watchpoint_single_step_handler(unsigned long pc)
 755{
 756        int i;
 757        struct perf_event *wp, **slots;
 758        struct arch_hw_breakpoint *info;
 759
 760        slots = this_cpu_ptr(wp_on_reg);
 761
 762        for (i = 0; i < core_num_wrps; ++i) {
 763                rcu_read_lock();
 764
 765                wp = slots[i];
 766
 767                if (wp == NULL)
 768                        goto unlock;
 769
 770                info = counter_arch_bp(wp);
 771                if (!info->step_ctrl.enabled)
 772                        goto unlock;
 773
 774                /*
 775                 * Restore the original watchpoint if we've completed the
 776                 * single-step.
 777                 */
 778                if (info->trigger != pc)
 779                        disable_single_step(wp);
 780
 781unlock:
 782                rcu_read_unlock();
 783        }
 784}
 785
 786static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
 787{
 788        int i;
 789        u32 ctrl_reg, val, addr;
 790        struct perf_event *bp, **slots;
 791        struct arch_hw_breakpoint *info;
 792        struct arch_hw_breakpoint_ctrl ctrl;
 793
 794        slots = this_cpu_ptr(bp_on_reg);
 795
 796        /* The exception entry code places the amended lr in the PC. */
 797        addr = regs->ARM_pc;
 798
 799        /* Check the currently installed breakpoints first. */
 800        for (i = 0; i < core_num_brps; ++i) {
 801                rcu_read_lock();
 802
 803                bp = slots[i];
 804
 805                if (bp == NULL)
 806                        goto unlock;
 807
 808                info = counter_arch_bp(bp);
 809
 810                /* Check if the breakpoint value matches. */
 811                val = read_wb_reg(ARM_BASE_BVR + i);
 812                if (val != (addr & ~0x3))
 813                        goto mismatch;
 814
 815                /* Possible match, check the byte address select to confirm. */
 816                ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
 817                decode_ctrl_reg(ctrl_reg, &ctrl);
 818                if ((1 << (addr & 0x3)) & ctrl.len) {
 819                        info->trigger = addr;
 820                        pr_debug("breakpoint fired: address = 0x%x\n", addr);
 821                        perf_bp_event(bp, regs);
 822                        if (!bp->overflow_handler)
 823                                enable_single_step(bp, addr);
 824                        goto unlock;
 825                }
 826
 827mismatch:
 828                /* If we're stepping a breakpoint, it can now be restored. */
 829                if (info->step_ctrl.enabled)
 830                        disable_single_step(bp);
 831unlock:
 832                rcu_read_unlock();
 833        }
 834
 835        /* Handle any pending watchpoint single-step breakpoints. */
 836        watchpoint_single_step_handler(addr);
 837}
 838
 839/*
 840 * Called from either the Data Abort Handler [watchpoint] or the
 841 * Prefetch Abort Handler [breakpoint] with interrupts disabled.
 842 */
 843static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
 844                                 struct pt_regs *regs)
 845{
 846        int ret = 0;
 847        u32 dscr;
 848
 849        preempt_disable();
 850
 851        if (interrupts_enabled(regs))
 852                local_irq_enable();
 853
 854        /* We only handle watchpoints and hardware breakpoints. */
 855        ARM_DBG_READ(c0, c1, 0, dscr);
 856
 857        /* Perform perf callbacks. */
 858        switch (ARM_DSCR_MOE(dscr)) {
 859        case ARM_ENTRY_BREAKPOINT:
 860                breakpoint_handler(addr, regs);
 861                break;
 862        case ARM_ENTRY_ASYNC_WATCHPOINT:
 863                WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
 864        case ARM_ENTRY_SYNC_WATCHPOINT:
 865                watchpoint_handler(addr, fsr, regs);
 866                break;
 867        default:
 868                ret = 1; /* Unhandled fault. */
 869        }
 870
 871        preempt_enable();
 872
 873        return ret;
 874}
 875
 876/*
 877 * One-time initialisation.
 878 */
 879static cpumask_t debug_err_mask;
 880
 881static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
 882{
 883        int cpu = smp_processor_id();
 884
 885        pr_warn("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
 886                instr, cpu);
 887
 888        /* Set the error flag for this CPU and skip the faulting instruction. */
 889        cpumask_set_cpu(cpu, &debug_err_mask);
 890        instruction_pointer(regs) += 4;
 891        return 0;
 892}
 893
 894static struct undef_hook debug_reg_hook = {
 895        .instr_mask     = 0x0fe80f10,
 896        .instr_val      = 0x0e000e10,
 897        .fn             = debug_reg_trap,
 898};
 899
 900/* Does this core support OS Save and Restore? */
 901static bool core_has_os_save_restore(void)
 902{
 903        u32 oslsr;
 904
 905        switch (get_debug_arch()) {
 906        case ARM_DEBUG_ARCH_V7_1:
 907                return true;
 908        case ARM_DEBUG_ARCH_V7_ECP14:
 909                ARM_DBG_READ(c1, c1, 4, oslsr);
 910                if (oslsr & ARM_OSLSR_OSLM0)
 911                        return true;
 912        default:
 913                return false;
 914        }
 915}
 916
 917static void reset_ctrl_regs(unsigned int cpu)
 918{
 919        int i, raw_num_brps, err = 0;
 920        u32 val;
 921
 922        /*
 923         * v7 debug contains save and restore registers so that debug state
 924         * can be maintained across low-power modes without leaving the debug
 925         * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
 926         * the debug registers out of reset, so we must unlock the OS Lock
 927         * Access Register to avoid taking undefined instruction exceptions
 928         * later on.
 929         */
 930        switch (debug_arch) {
 931        case ARM_DEBUG_ARCH_V6:
 932        case ARM_DEBUG_ARCH_V6_1:
 933                /* ARMv6 cores clear the registers out of reset. */
 934                goto out_mdbgen;
 935        case ARM_DEBUG_ARCH_V7_ECP14:
 936                /*
 937                 * Ensure sticky power-down is clear (i.e. debug logic is
 938                 * powered up).
 939                 */
 940                ARM_DBG_READ(c1, c5, 4, val);
 941                if ((val & 0x1) == 0)
 942                        err = -EPERM;
 943
 944                if (!has_ossr)
 945                        goto clear_vcr;
 946                break;
 947        case ARM_DEBUG_ARCH_V7_1:
 948                /*
 949                 * Ensure the OS double lock is clear.
 950                 */
 951                ARM_DBG_READ(c1, c3, 4, val);
 952                if ((val & 0x1) == 1)
 953                        err = -EPERM;
 954                break;
 955        }
 956
 957        if (err) {
 958                pr_warn_once("CPU %d debug is powered down!\n", cpu);
 959                cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
 960                return;
 961        }
 962
 963        /*
 964         * Unconditionally clear the OS lock by writing a value
 965         * other than CS_LAR_KEY to the access register.
 966         */
 967        ARM_DBG_WRITE(c1, c0, 4, ~CORESIGHT_UNLOCK);
 968        isb();
 969
 970        /*
 971         * Clear any configured vector-catch events before
 972         * enabling monitor mode.
 973         */
 974clear_vcr:
 975        ARM_DBG_WRITE(c0, c7, 0, 0);
 976        isb();
 977
 978        if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
 979                pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
 980                return;
 981        }
 982
 983        /*
 984         * The control/value register pairs are UNKNOWN out of reset so
 985         * clear them to avoid spurious debug events.
 986         */
 987        raw_num_brps = get_num_brp_resources();
 988        for (i = 0; i < raw_num_brps; ++i) {
 989                write_wb_reg(ARM_BASE_BCR + i, 0UL);
 990                write_wb_reg(ARM_BASE_BVR + i, 0UL);
 991        }
 992
 993        for (i = 0; i < core_num_wrps; ++i) {
 994                write_wb_reg(ARM_BASE_WCR + i, 0UL);
 995                write_wb_reg(ARM_BASE_WVR + i, 0UL);
 996        }
 997
 998        if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
 999                pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
1000                return;
1001        }
1002
1003        /*
1004         * Have a crack at enabling monitor mode. We don't actually need
1005         * it yet, but reporting an error early is useful if it fails.
1006         */
1007out_mdbgen:
1008        if (enable_monitor_mode())
1009                cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
1010}
1011
1012static int dbg_reset_online(unsigned int cpu)
1013{
1014        local_irq_disable();
1015        reset_ctrl_regs(cpu);
1016        local_irq_enable();
1017        return 0;
1018}
1019
1020#ifdef CONFIG_CPU_PM
1021static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
1022                             void *v)
1023{
1024        if (action == CPU_PM_EXIT)
1025                reset_ctrl_regs(smp_processor_id());
1026
1027        return NOTIFY_OK;
1028}
1029
1030static struct notifier_block dbg_cpu_pm_nb = {
1031        .notifier_call = dbg_cpu_pm_notify,
1032};
1033
1034static void __init pm_init(void)
1035{
1036        cpu_pm_register_notifier(&dbg_cpu_pm_nb);
1037}
1038#else
1039static inline void pm_init(void)
1040{
1041}
1042#endif
1043
1044static int __init arch_hw_breakpoint_init(void)
1045{
1046        int ret;
1047
1048        debug_arch = get_debug_arch();
1049
1050        if (!debug_arch_supported()) {
1051                pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
1052                return 0;
1053        }
1054
1055        /*
1056         * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
1057         * whenever a WFI is issued, even if the core is not powered down, in
1058         * violation of the architecture.  When DBGPRSR.SPD is set, accesses to
1059         * breakpoint and watchpoint registers are treated as undefined, so
1060         * this results in boot time and runtime failures when these are
1061         * accessed and we unexpectedly take a trap.
1062         *
1063         * It's not clear if/how this can be worked around, so we blacklist
1064         * Scorpion CPUs to avoid these issues.
1065        */
1066        if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
1067                pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
1068                return 0;
1069        }
1070
1071        has_ossr = core_has_os_save_restore();
1072
1073        /* Determine how many BRPs/WRPs are available. */
1074        core_num_brps = get_num_brps();
1075        core_num_wrps = get_num_wrps();
1076
1077        /*
1078         * We need to tread carefully here because DBGSWENABLE may be
1079         * driven low on this core and there isn't an architected way to
1080         * determine that.
1081         */
1082        cpus_read_lock();
1083        register_undef_hook(&debug_reg_hook);
1084
1085        /*
1086         * Register CPU notifier which resets the breakpoint resources. We
1087         * assume that a halting debugger will leave the world in a nice state
1088         * for us.
1089         */
1090        ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN,
1091                                           "arm/hw_breakpoint:online",
1092                                           dbg_reset_online, NULL);
1093        unregister_undef_hook(&debug_reg_hook);
1094        if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
1095                core_num_brps = 0;
1096                core_num_wrps = 0;
1097                if (ret > 0)
1098                        cpuhp_remove_state_nocalls_cpuslocked(ret);
1099                cpus_read_unlock();
1100                return 0;
1101        }
1102
1103        pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
1104                core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
1105                "", core_num_wrps);
1106
1107        /* Work out the maximum supported watchpoint length. */
1108        max_watchpoint_len = get_max_wp_len();
1109        pr_info("maximum watchpoint size is %u bytes.\n",
1110                        max_watchpoint_len);
1111
1112        /* Register debug fault handler. */
1113        hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1114                        TRAP_HWBKPT, "watchpoint debug exception");
1115        hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
1116                        TRAP_HWBKPT, "breakpoint debug exception");
1117        cpus_read_unlock();
1118
1119        /* Register PM notifiers. */
1120        pm_init();
1121        return 0;
1122}
1123arch_initcall(arch_hw_breakpoint_init);
1124
1125void hw_breakpoint_pmu_read(struct perf_event *bp)
1126{
1127}
1128
1129/*
1130 * Dummy function to register with die_notifier.
1131 */
1132int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
1133                                        unsigned long val, void *data)
1134{
1135        return NOTIFY_DONE;
1136}
1137