linux/arch/arm64/kernel/hw_breakpoint.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
   4 * using the CPU's debug registers.
   5 *
   6 * Copyright (C) 2012 ARM Limited
   7 * Author: Will Deacon <will.deacon@arm.com>
   8 */
   9
  10#define pr_fmt(fmt) "hw-breakpoint: " fmt
  11
  12#include <linux/compat.h>
  13#include <linux/cpu_pm.h>
  14#include <linux/errno.h>
  15#include <linux/hw_breakpoint.h>
  16#include <linux/kprobes.h>
  17#include <linux/perf_event.h>
  18#include <linux/ptrace.h>
  19#include <linux/smp.h>
  20#include <linux/uaccess.h>
  21
  22#include <asm/current.h>
  23#include <asm/debug-monitors.h>
  24#include <asm/hw_breakpoint.h>
  25#include <asm/traps.h>
  26#include <asm/cputype.h>
  27#include <asm/system_misc.h>
  28
  29/* Breakpoint currently in use for each BRP. */
  30static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
  31
  32/* Watchpoint currently in use for each WRP. */
  33static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
  34
  35/* Currently stepping a per-CPU kernel breakpoint. */
  36static DEFINE_PER_CPU(int, stepping_kernel_bp);
  37
  38/* Number of BRP/WRP registers on this CPU. */
  39static int core_num_brps;
  40static int core_num_wrps;
  41
  42int hw_breakpoint_slots(int type)
  43{
  44        /*
  45         * We can be called early, so don't rely on
  46         * our static variables being initialised.
  47         */
  48        switch (type) {
  49        case TYPE_INST:
  50                return get_num_brps();
  51        case TYPE_DATA:
  52                return get_num_wrps();
  53        default:
  54                pr_warn("unknown slot type: %d\n", type);
  55                return 0;
  56        }
  57}
  58
  59#define READ_WB_REG_CASE(OFF, N, REG, VAL)      \
  60        case (OFF + N):                         \
  61                AARCH64_DBG_READ(N, REG, VAL);  \
  62                break
  63
  64#define WRITE_WB_REG_CASE(OFF, N, REG, VAL)     \
  65        case (OFF + N):                         \
  66                AARCH64_DBG_WRITE(N, REG, VAL); \
  67                break
  68
  69#define GEN_READ_WB_REG_CASES(OFF, REG, VAL)    \
  70        READ_WB_REG_CASE(OFF,  0, REG, VAL);    \
  71        READ_WB_REG_CASE(OFF,  1, REG, VAL);    \
  72        READ_WB_REG_CASE(OFF,  2, REG, VAL);    \
  73        READ_WB_REG_CASE(OFF,  3, REG, VAL);    \
  74        READ_WB_REG_CASE(OFF,  4, REG, VAL);    \
  75        READ_WB_REG_CASE(OFF,  5, REG, VAL);    \
  76        READ_WB_REG_CASE(OFF,  6, REG, VAL);    \
  77        READ_WB_REG_CASE(OFF,  7, REG, VAL);    \
  78        READ_WB_REG_CASE(OFF,  8, REG, VAL);    \
  79        READ_WB_REG_CASE(OFF,  9, REG, VAL);    \
  80        READ_WB_REG_CASE(OFF, 10, REG, VAL);    \
  81        READ_WB_REG_CASE(OFF, 11, REG, VAL);    \
  82        READ_WB_REG_CASE(OFF, 12, REG, VAL);    \
  83        READ_WB_REG_CASE(OFF, 13, REG, VAL);    \
  84        READ_WB_REG_CASE(OFF, 14, REG, VAL);    \
  85        READ_WB_REG_CASE(OFF, 15, REG, VAL)
  86
  87#define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL)   \
  88        WRITE_WB_REG_CASE(OFF,  0, REG, VAL);   \
  89        WRITE_WB_REG_CASE(OFF,  1, REG, VAL);   \
  90        WRITE_WB_REG_CASE(OFF,  2, REG, VAL);   \
  91        WRITE_WB_REG_CASE(OFF,  3, REG, VAL);   \
  92        WRITE_WB_REG_CASE(OFF,  4, REG, VAL);   \
  93        WRITE_WB_REG_CASE(OFF,  5, REG, VAL);   \
  94        WRITE_WB_REG_CASE(OFF,  6, REG, VAL);   \
  95        WRITE_WB_REG_CASE(OFF,  7, REG, VAL);   \
  96        WRITE_WB_REG_CASE(OFF,  8, REG, VAL);   \
  97        WRITE_WB_REG_CASE(OFF,  9, REG, VAL);   \
  98        WRITE_WB_REG_CASE(OFF, 10, REG, VAL);   \
  99        WRITE_WB_REG_CASE(OFF, 11, REG, VAL);   \
 100        WRITE_WB_REG_CASE(OFF, 12, REG, VAL);   \
 101        WRITE_WB_REG_CASE(OFF, 13, REG, VAL);   \
 102        WRITE_WB_REG_CASE(OFF, 14, REG, VAL);   \
 103        WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
 104
 105static u64 read_wb_reg(int reg, int n)
 106{
 107        u64 val = 0;
 108
 109        switch (reg + n) {
 110        GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
 111        GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
 112        GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
 113        GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
 114        default:
 115                pr_warn("attempt to read from unknown breakpoint register %d\n", n);
 116        }
 117
 118        return val;
 119}
 120NOKPROBE_SYMBOL(read_wb_reg);
 121
 122static void write_wb_reg(int reg, int n, u64 val)
 123{
 124        switch (reg + n) {
 125        GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
 126        GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
 127        GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
 128        GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
 129        default:
 130                pr_warn("attempt to write to unknown breakpoint register %d\n", n);
 131        }
 132        isb();
 133}
 134NOKPROBE_SYMBOL(write_wb_reg);
 135
 136/*
 137 * Convert a breakpoint privilege level to the corresponding exception
 138 * level.
 139 */
 140static enum dbg_active_el debug_exception_level(int privilege)
 141{
 142        switch (privilege) {
 143        case AARCH64_BREAKPOINT_EL0:
 144                return DBG_ACTIVE_EL0;
 145        case AARCH64_BREAKPOINT_EL1:
 146                return DBG_ACTIVE_EL1;
 147        default:
 148                pr_warn("invalid breakpoint privilege level %d\n", privilege);
 149                return -EINVAL;
 150        }
 151}
 152NOKPROBE_SYMBOL(debug_exception_level);
 153
 154enum hw_breakpoint_ops {
 155        HW_BREAKPOINT_INSTALL,
 156        HW_BREAKPOINT_UNINSTALL,
 157        HW_BREAKPOINT_RESTORE
 158};
 159
 160static int is_compat_bp(struct perf_event *bp)
 161{
 162        struct task_struct *tsk = bp->hw.target;
 163
 164        /*
 165         * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
 166         * In this case, use the native interface, since we don't have
 167         * the notion of a "compat CPU" and could end up relying on
 168         * deprecated behaviour if we use unaligned watchpoints in
 169         * AArch64 state.
 170         */
 171        return tsk && is_compat_thread(task_thread_info(tsk));
 172}
 173
 174/**
 175 * hw_breakpoint_slot_setup - Find and setup a perf slot according to
 176 *                            operations
 177 *
 178 * @slots: pointer to array of slots
 179 * @max_slots: max number of slots
 180 * @bp: perf_event to setup
 181 * @ops: operation to be carried out on the slot
 182 *
 183 * Return:
 184 *      slot index on success
 185 *      -ENOSPC if no slot is available/matches
 186 *      -EINVAL on wrong operations parameter
 187 */
 188static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
 189                                    struct perf_event *bp,
 190                                    enum hw_breakpoint_ops ops)
 191{
 192        int i;
 193        struct perf_event **slot;
 194
 195        for (i = 0; i < max_slots; ++i) {
 196                slot = &slots[i];
 197                switch (ops) {
 198                case HW_BREAKPOINT_INSTALL:
 199                        if (!*slot) {
 200                                *slot = bp;
 201                                return i;
 202                        }
 203                        break;
 204                case HW_BREAKPOINT_UNINSTALL:
 205                        if (*slot == bp) {
 206                                *slot = NULL;
 207                                return i;
 208                        }
 209                        break;
 210                case HW_BREAKPOINT_RESTORE:
 211                        if (*slot == bp)
 212                                return i;
 213                        break;
 214                default:
 215                        pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
 216                        return -EINVAL;
 217                }
 218        }
 219        return -ENOSPC;
 220}
 221
 222static int hw_breakpoint_control(struct perf_event *bp,
 223                                 enum hw_breakpoint_ops ops)
 224{
 225        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 226        struct perf_event **slots;
 227        struct debug_info *debug_info = &current->thread.debug;
 228        int i, max_slots, ctrl_reg, val_reg, reg_enable;
 229        enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
 230        u32 ctrl;
 231
 232        if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
 233                /* Breakpoint */
 234                ctrl_reg = AARCH64_DBG_REG_BCR;
 235                val_reg = AARCH64_DBG_REG_BVR;
 236                slots = this_cpu_ptr(bp_on_reg);
 237                max_slots = core_num_brps;
 238                reg_enable = !debug_info->bps_disabled;
 239        } else {
 240                /* Watchpoint */
 241                ctrl_reg = AARCH64_DBG_REG_WCR;
 242                val_reg = AARCH64_DBG_REG_WVR;
 243                slots = this_cpu_ptr(wp_on_reg);
 244                max_slots = core_num_wrps;
 245                reg_enable = !debug_info->wps_disabled;
 246        }
 247
 248        i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
 249
 250        if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
 251                return i;
 252
 253        switch (ops) {
 254        case HW_BREAKPOINT_INSTALL:
 255                /*
 256                 * Ensure debug monitors are enabled at the correct exception
 257                 * level.
 258                 */
 259                enable_debug_monitors(dbg_el);
 260                /* Fall through */
 261        case HW_BREAKPOINT_RESTORE:
 262                /* Setup the address register. */
 263                write_wb_reg(val_reg, i, info->address);
 264
 265                /* Setup the control register. */
 266                ctrl = encode_ctrl_reg(info->ctrl);
 267                write_wb_reg(ctrl_reg, i,
 268                             reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
 269                break;
 270        case HW_BREAKPOINT_UNINSTALL:
 271                /* Reset the control register. */
 272                write_wb_reg(ctrl_reg, i, 0);
 273
 274                /*
 275                 * Release the debug monitors for the correct exception
 276                 * level.
 277                 */
 278                disable_debug_monitors(dbg_el);
 279                break;
 280        }
 281
 282        return 0;
 283}
 284
 285/*
 286 * Install a perf counter breakpoint.
 287 */
 288int arch_install_hw_breakpoint(struct perf_event *bp)
 289{
 290        return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
 291}
 292
 293void arch_uninstall_hw_breakpoint(struct perf_event *bp)
 294{
 295        hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
 296}
 297
 298static int get_hbp_len(u8 hbp_len)
 299{
 300        unsigned int len_in_bytes = 0;
 301
 302        switch (hbp_len) {
 303        case ARM_BREAKPOINT_LEN_1:
 304                len_in_bytes = 1;
 305                break;
 306        case ARM_BREAKPOINT_LEN_2:
 307                len_in_bytes = 2;
 308                break;
 309        case ARM_BREAKPOINT_LEN_3:
 310                len_in_bytes = 3;
 311                break;
 312        case ARM_BREAKPOINT_LEN_4:
 313                len_in_bytes = 4;
 314                break;
 315        case ARM_BREAKPOINT_LEN_5:
 316                len_in_bytes = 5;
 317                break;
 318        case ARM_BREAKPOINT_LEN_6:
 319                len_in_bytes = 6;
 320                break;
 321        case ARM_BREAKPOINT_LEN_7:
 322                len_in_bytes = 7;
 323                break;
 324        case ARM_BREAKPOINT_LEN_8:
 325                len_in_bytes = 8;
 326                break;
 327        }
 328
 329        return len_in_bytes;
 330}
 331
 332/*
 333 * Check whether bp virtual address is in kernel space.
 334 */
 335int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 336{
 337        unsigned int len;
 338        unsigned long va;
 339
 340        va = hw->address;
 341        len = get_hbp_len(hw->ctrl.len);
 342
 343        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 344}
 345
 346/*
 347 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
 348 * Hopefully this will disappear when ptrace can bypass the conversion
 349 * to generic breakpoint descriptions.
 350 */
 351int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
 352                           int *gen_len, int *gen_type, int *offset)
 353{
 354        /* Type */
 355        switch (ctrl.type) {
 356        case ARM_BREAKPOINT_EXECUTE:
 357                *gen_type = HW_BREAKPOINT_X;
 358                break;
 359        case ARM_BREAKPOINT_LOAD:
 360                *gen_type = HW_BREAKPOINT_R;
 361                break;
 362        case ARM_BREAKPOINT_STORE:
 363                *gen_type = HW_BREAKPOINT_W;
 364                break;
 365        case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
 366                *gen_type = HW_BREAKPOINT_RW;
 367                break;
 368        default:
 369                return -EINVAL;
 370        }
 371
 372        if (!ctrl.len)
 373                return -EINVAL;
 374        *offset = __ffs(ctrl.len);
 375
 376        /* Len */
 377        switch (ctrl.len >> *offset) {
 378        case ARM_BREAKPOINT_LEN_1:
 379                *gen_len = HW_BREAKPOINT_LEN_1;
 380                break;
 381        case ARM_BREAKPOINT_LEN_2:
 382                *gen_len = HW_BREAKPOINT_LEN_2;
 383                break;
 384        case ARM_BREAKPOINT_LEN_3:
 385                *gen_len = HW_BREAKPOINT_LEN_3;
 386                break;
 387        case ARM_BREAKPOINT_LEN_4:
 388                *gen_len = HW_BREAKPOINT_LEN_4;
 389                break;
 390        case ARM_BREAKPOINT_LEN_5:
 391                *gen_len = HW_BREAKPOINT_LEN_5;
 392                break;
 393        case ARM_BREAKPOINT_LEN_6:
 394                *gen_len = HW_BREAKPOINT_LEN_6;
 395                break;
 396        case ARM_BREAKPOINT_LEN_7:
 397                *gen_len = HW_BREAKPOINT_LEN_7;
 398                break;
 399        case ARM_BREAKPOINT_LEN_8:
 400                *gen_len = HW_BREAKPOINT_LEN_8;
 401                break;
 402        default:
 403                return -EINVAL;
 404        }
 405
 406        return 0;
 407}
 408
 409/*
 410 * Construct an arch_hw_breakpoint from a perf_event.
 411 */
 412static int arch_build_bp_info(struct perf_event *bp,
 413                              const struct perf_event_attr *attr,
 414                              struct arch_hw_breakpoint *hw)
 415{
 416        /* Type */
 417        switch (attr->bp_type) {
 418        case HW_BREAKPOINT_X:
 419                hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
 420                break;
 421        case HW_BREAKPOINT_R:
 422                hw->ctrl.type = ARM_BREAKPOINT_LOAD;
 423                break;
 424        case HW_BREAKPOINT_W:
 425                hw->ctrl.type = ARM_BREAKPOINT_STORE;
 426                break;
 427        case HW_BREAKPOINT_RW:
 428                hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
 429                break;
 430        default:
 431                return -EINVAL;
 432        }
 433
 434        /* Len */
 435        switch (attr->bp_len) {
 436        case HW_BREAKPOINT_LEN_1:
 437                hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
 438                break;
 439        case HW_BREAKPOINT_LEN_2:
 440                hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
 441                break;
 442        case HW_BREAKPOINT_LEN_3:
 443                hw->ctrl.len = ARM_BREAKPOINT_LEN_3;
 444                break;
 445        case HW_BREAKPOINT_LEN_4:
 446                hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
 447                break;
 448        case HW_BREAKPOINT_LEN_5:
 449                hw->ctrl.len = ARM_BREAKPOINT_LEN_5;
 450                break;
 451        case HW_BREAKPOINT_LEN_6:
 452                hw->ctrl.len = ARM_BREAKPOINT_LEN_6;
 453                break;
 454        case HW_BREAKPOINT_LEN_7:
 455                hw->ctrl.len = ARM_BREAKPOINT_LEN_7;
 456                break;
 457        case HW_BREAKPOINT_LEN_8:
 458                hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
 459                break;
 460        default:
 461                return -EINVAL;
 462        }
 463
 464        /*
 465         * On AArch64, we only permit breakpoints of length 4, whereas
 466         * AArch32 also requires breakpoints of length 2 for Thumb.
 467         * Watchpoints can be of length 1, 2, 4 or 8 bytes.
 468         */
 469        if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
 470                if (is_compat_bp(bp)) {
 471                        if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
 472                            hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
 473                                return -EINVAL;
 474                } else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) {
 475                        /*
 476                         * FIXME: Some tools (I'm looking at you perf) assume
 477                         *        that breakpoints should be sizeof(long). This
 478                         *        is nonsense. For now, we fix up the parameter
 479                         *        but we should probably return -EINVAL instead.
 480                         */
 481                        hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
 482                }
 483        }
 484
 485        /* Address */
 486        hw->address = attr->bp_addr;
 487
 488        /*
 489         * Privilege
 490         * Note that we disallow combined EL0/EL1 breakpoints because
 491         * that would complicate the stepping code.
 492         */
 493        if (arch_check_bp_in_kernelspace(hw))
 494                hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
 495        else
 496                hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
 497
 498        /* Enabled? */
 499        hw->ctrl.enabled = !attr->disabled;
 500
 501        return 0;
 502}
 503
 504/*
 505 * Validate the arch-specific HW Breakpoint register settings.
 506 */
 507int hw_breakpoint_arch_parse(struct perf_event *bp,
 508                             const struct perf_event_attr *attr,
 509                             struct arch_hw_breakpoint *hw)
 510{
 511        int ret;
 512        u64 alignment_mask, offset;
 513
 514        /* Build the arch_hw_breakpoint. */
 515        ret = arch_build_bp_info(bp, attr, hw);
 516        if (ret)
 517                return ret;
 518
 519        /*
 520         * Check address alignment.
 521         * We don't do any clever alignment correction for watchpoints
 522         * because using 64-bit unaligned addresses is deprecated for
 523         * AArch64.
 524         *
 525         * AArch32 tasks expect some simple alignment fixups, so emulate
 526         * that here.
 527         */
 528        if (is_compat_bp(bp)) {
 529                if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
 530                        alignment_mask = 0x7;
 531                else
 532                        alignment_mask = 0x3;
 533                offset = hw->address & alignment_mask;
 534                switch (offset) {
 535                case 0:
 536                        /* Aligned */
 537                        break;
 538                case 1:
 539                case 2:
 540                        /* Allow halfword watchpoints and breakpoints. */
 541                        if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
 542                                break;
 543
 544                        /* Fallthrough */
 545                case 3:
 546                        /* Allow single byte watchpoint. */
 547                        if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
 548                                break;
 549
 550                        /* Fallthrough */
 551                default:
 552                        return -EINVAL;
 553                }
 554        } else {
 555                if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE)
 556                        alignment_mask = 0x3;
 557                else
 558                        alignment_mask = 0x7;
 559                offset = hw->address & alignment_mask;
 560        }
 561
 562        hw->address &= ~alignment_mask;
 563        hw->ctrl.len <<= offset;
 564
 565        /*
 566         * Disallow per-task kernel breakpoints since these would
 567         * complicate the stepping code.
 568         */
 569        if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
 570                return -EINVAL;
 571
 572        return 0;
 573}
 574
 575/*
 576 * Enable/disable all of the breakpoints active at the specified
 577 * exception level at the register level.
 578 * This is used when single-stepping after a breakpoint exception.
 579 */
 580static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
 581{
 582        int i, max_slots, privilege;
 583        u32 ctrl;
 584        struct perf_event **slots;
 585
 586        switch (reg) {
 587        case AARCH64_DBG_REG_BCR:
 588                slots = this_cpu_ptr(bp_on_reg);
 589                max_slots = core_num_brps;
 590                break;
 591        case AARCH64_DBG_REG_WCR:
 592                slots = this_cpu_ptr(wp_on_reg);
 593                max_slots = core_num_wrps;
 594                break;
 595        default:
 596                return;
 597        }
 598
 599        for (i = 0; i < max_slots; ++i) {
 600                if (!slots[i])
 601                        continue;
 602
 603                privilege = counter_arch_bp(slots[i])->ctrl.privilege;
 604                if (debug_exception_level(privilege) != el)
 605                        continue;
 606
 607                ctrl = read_wb_reg(reg, i);
 608                if (enable)
 609                        ctrl |= 0x1;
 610                else
 611                        ctrl &= ~0x1;
 612                write_wb_reg(reg, i, ctrl);
 613        }
 614}
 615NOKPROBE_SYMBOL(toggle_bp_registers);
 616
 617/*
 618 * Debug exception handlers.
 619 */
 620static int breakpoint_handler(unsigned long unused, unsigned int esr,
 621                              struct pt_regs *regs)
 622{
 623        int i, step = 0, *kernel_step;
 624        u32 ctrl_reg;
 625        u64 addr, val;
 626        struct perf_event *bp, **slots;
 627        struct debug_info *debug_info;
 628        struct arch_hw_breakpoint_ctrl ctrl;
 629
 630        slots = this_cpu_ptr(bp_on_reg);
 631        addr = instruction_pointer(regs);
 632        debug_info = &current->thread.debug;
 633
 634        for (i = 0; i < core_num_brps; ++i) {
 635                rcu_read_lock();
 636
 637                bp = slots[i];
 638
 639                if (bp == NULL)
 640                        goto unlock;
 641
 642                /* Check if the breakpoint value matches. */
 643                val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
 644                if (val != (addr & ~0x3))
 645                        goto unlock;
 646
 647                /* Possible match, check the byte address select to confirm. */
 648                ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
 649                decode_ctrl_reg(ctrl_reg, &ctrl);
 650                if (!((1 << (addr & 0x3)) & ctrl.len))
 651                        goto unlock;
 652
 653                counter_arch_bp(bp)->trigger = addr;
 654                perf_bp_event(bp, regs);
 655
 656                /* Do we need to handle the stepping? */
 657                if (is_default_overflow_handler(bp))
 658                        step = 1;
 659unlock:
 660                rcu_read_unlock();
 661        }
 662
 663        if (!step)
 664                return 0;
 665
 666        if (user_mode(regs)) {
 667                debug_info->bps_disabled = 1;
 668                toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
 669
 670                /* If we're already stepping a watchpoint, just return. */
 671                if (debug_info->wps_disabled)
 672                        return 0;
 673
 674                if (test_thread_flag(TIF_SINGLESTEP))
 675                        debug_info->suspended_step = 1;
 676                else
 677                        user_enable_single_step(current);
 678        } else {
 679                toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
 680                kernel_step = this_cpu_ptr(&stepping_kernel_bp);
 681
 682                if (*kernel_step != ARM_KERNEL_STEP_NONE)
 683                        return 0;
 684
 685                if (kernel_active_single_step()) {
 686                        *kernel_step = ARM_KERNEL_STEP_SUSPEND;
 687                } else {
 688                        *kernel_step = ARM_KERNEL_STEP_ACTIVE;
 689                        kernel_enable_single_step(regs);
 690                }
 691        }
 692
 693        return 0;
 694}
 695NOKPROBE_SYMBOL(breakpoint_handler);
 696
 697/*
 698 * Arm64 hardware does not always report a watchpoint hit address that matches
 699 * one of the watchpoints set. It can also report an address "near" the
 700 * watchpoint if a single instruction access both watched and unwatched
 701 * addresses. There is no straight-forward way, short of disassembling the
 702 * offending instruction, to map that address back to the watchpoint. This
 703 * function computes the distance of the memory access from the watchpoint as a
 704 * heuristic for the likelyhood that a given access triggered the watchpoint.
 705 *
 706 * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
 707 * exception" of ARMv8 Architecture Reference Manual for details.
 708 *
 709 * The function returns the distance of the address from the bytes watched by
 710 * the watchpoint. In case of an exact match, it returns 0.
 711 */
 712static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
 713                                        struct arch_hw_breakpoint_ctrl *ctrl)
 714{
 715        u64 wp_low, wp_high;
 716        u32 lens, lene;
 717
 718        addr = untagged_addr(addr);
 719
 720        lens = __ffs(ctrl->len);
 721        lene = __fls(ctrl->len);
 722
 723        wp_low = val + lens;
 724        wp_high = val + lene;
 725        if (addr < wp_low)
 726                return wp_low - addr;
 727        else if (addr > wp_high)
 728                return addr - wp_high;
 729        else
 730                return 0;
 731}
 732
 733static int watchpoint_handler(unsigned long addr, unsigned int esr,
 734                              struct pt_regs *regs)
 735{
 736        int i, step = 0, *kernel_step, access, closest_match = 0;
 737        u64 min_dist = -1, dist;
 738        u32 ctrl_reg;
 739        u64 val;
 740        struct perf_event *wp, **slots;
 741        struct debug_info *debug_info;
 742        struct arch_hw_breakpoint *info;
 743        struct arch_hw_breakpoint_ctrl ctrl;
 744
 745        slots = this_cpu_ptr(wp_on_reg);
 746        debug_info = &current->thread.debug;
 747
 748        /*
 749         * Find all watchpoints that match the reported address. If no exact
 750         * match is found. Attribute the hit to the closest watchpoint.
 751         */
 752        rcu_read_lock();
 753        for (i = 0; i < core_num_wrps; ++i) {
 754                wp = slots[i];
 755                if (wp == NULL)
 756                        continue;
 757
 758                /*
 759                 * Check that the access type matches.
 760                 * 0 => load, otherwise => store
 761                 */
 762                access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
 763                         HW_BREAKPOINT_R;
 764                if (!(access & hw_breakpoint_type(wp)))
 765                        continue;
 766
 767                /* Check if the watchpoint value and byte select match. */
 768                val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
 769                ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
 770                decode_ctrl_reg(ctrl_reg, &ctrl);
 771                dist = get_distance_from_watchpoint(addr, val, &ctrl);
 772                if (dist < min_dist) {
 773                        min_dist = dist;
 774                        closest_match = i;
 775                }
 776                /* Is this an exact match? */
 777                if (dist != 0)
 778                        continue;
 779
 780                info = counter_arch_bp(wp);
 781                info->trigger = addr;
 782                perf_bp_event(wp, regs);
 783
 784                /* Do we need to handle the stepping? */
 785                if (is_default_overflow_handler(wp))
 786                        step = 1;
 787        }
 788        if (min_dist > 0 && min_dist != -1) {
 789                /* No exact match found. */
 790                wp = slots[closest_match];
 791                info = counter_arch_bp(wp);
 792                info->trigger = addr;
 793                perf_bp_event(wp, regs);
 794
 795                /* Do we need to handle the stepping? */
 796                if (is_default_overflow_handler(wp))
 797                        step = 1;
 798        }
 799        rcu_read_unlock();
 800
 801        if (!step)
 802                return 0;
 803
 804        /*
 805         * We always disable EL0 watchpoints because the kernel can
 806         * cause these to fire via an unprivileged access.
 807         */
 808        toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
 809
 810        if (user_mode(regs)) {
 811                debug_info->wps_disabled = 1;
 812
 813                /* If we're already stepping a breakpoint, just return. */
 814                if (debug_info->bps_disabled)
 815                        return 0;
 816
 817                if (test_thread_flag(TIF_SINGLESTEP))
 818                        debug_info->suspended_step = 1;
 819                else
 820                        user_enable_single_step(current);
 821        } else {
 822                toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
 823                kernel_step = this_cpu_ptr(&stepping_kernel_bp);
 824
 825                if (*kernel_step != ARM_KERNEL_STEP_NONE)
 826                        return 0;
 827
 828                if (kernel_active_single_step()) {
 829                        *kernel_step = ARM_KERNEL_STEP_SUSPEND;
 830                } else {
 831                        *kernel_step = ARM_KERNEL_STEP_ACTIVE;
 832                        kernel_enable_single_step(regs);
 833                }
 834        }
 835
 836        return 0;
 837}
 838NOKPROBE_SYMBOL(watchpoint_handler);
 839
 840/*
 841 * Handle single-step exception.
 842 */
 843int reinstall_suspended_bps(struct pt_regs *regs)
 844{
 845        struct debug_info *debug_info = &current->thread.debug;
 846        int handled_exception = 0, *kernel_step;
 847
 848        kernel_step = this_cpu_ptr(&stepping_kernel_bp);
 849
 850        /*
 851         * Called from single-step exception handler.
 852         * Return 0 if execution can resume, 1 if a SIGTRAP should be
 853         * reported.
 854         */
 855        if (user_mode(regs)) {
 856                if (debug_info->bps_disabled) {
 857                        debug_info->bps_disabled = 0;
 858                        toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
 859                        handled_exception = 1;
 860                }
 861
 862                if (debug_info->wps_disabled) {
 863                        debug_info->wps_disabled = 0;
 864                        toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
 865                        handled_exception = 1;
 866                }
 867
 868                if (handled_exception) {
 869                        if (debug_info->suspended_step) {
 870                                debug_info->suspended_step = 0;
 871                                /* Allow exception handling to fall-through. */
 872                                handled_exception = 0;
 873                        } else {
 874                                user_disable_single_step(current);
 875                        }
 876                }
 877        } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
 878                toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
 879                toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
 880
 881                if (!debug_info->wps_disabled)
 882                        toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
 883
 884                if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
 885                        kernel_disable_single_step();
 886                        handled_exception = 1;
 887                } else {
 888                        handled_exception = 0;
 889                }
 890
 891                *kernel_step = ARM_KERNEL_STEP_NONE;
 892        }
 893
 894        return !handled_exception;
 895}
 896NOKPROBE_SYMBOL(reinstall_suspended_bps);
 897
 898/*
 899 * Context-switcher for restoring suspended breakpoints.
 900 */
 901void hw_breakpoint_thread_switch(struct task_struct *next)
 902{
 903        /*
 904         *           current        next
 905         * disabled: 0              0     => The usual case, NOTIFY_DONE
 906         *           0              1     => Disable the registers
 907         *           1              0     => Enable the registers
 908         *           1              1     => NOTIFY_DONE. per-task bps will
 909         *                                   get taken care of by perf.
 910         */
 911
 912        struct debug_info *current_debug_info, *next_debug_info;
 913
 914        current_debug_info = &current->thread.debug;
 915        next_debug_info = &next->thread.debug;
 916
 917        /* Update breakpoints. */
 918        if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
 919                toggle_bp_registers(AARCH64_DBG_REG_BCR,
 920                                    DBG_ACTIVE_EL0,
 921                                    !next_debug_info->bps_disabled);
 922
 923        /* Update watchpoints. */
 924        if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
 925                toggle_bp_registers(AARCH64_DBG_REG_WCR,
 926                                    DBG_ACTIVE_EL0,
 927                                    !next_debug_info->wps_disabled);
 928}
 929
 930/*
 931 * CPU initialisation.
 932 */
 933static int hw_breakpoint_reset(unsigned int cpu)
 934{
 935        int i;
 936        struct perf_event **slots;
 937        /*
 938         * When a CPU goes through cold-boot, it does not have any installed
 939         * slot, so it is safe to share the same function for restoring and
 940         * resetting breakpoints; when a CPU is hotplugged in, it goes
 941         * through the slots, which are all empty, hence it just resets control
 942         * and value for debug registers.
 943         * When this function is triggered on warm-boot through a CPU PM
 944         * notifier some slots might be initialized; if so they are
 945         * reprogrammed according to the debug slots content.
 946         */
 947        for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
 948                if (slots[i]) {
 949                        hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
 950                } else {
 951                        write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
 952                        write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
 953                }
 954        }
 955
 956        for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
 957                if (slots[i]) {
 958                        hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
 959                } else {
 960                        write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
 961                        write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
 962                }
 963        }
 964
 965        return 0;
 966}
 967
 968#ifdef CONFIG_CPU_PM
 969extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int));
 970#else
 971static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
 972{
 973}
 974#endif
 975
 976/*
 977 * One-time initialisation.
 978 */
 979static int __init arch_hw_breakpoint_init(void)
 980{
 981        int ret;
 982
 983        core_num_brps = get_num_brps();
 984        core_num_wrps = get_num_wrps();
 985
 986        pr_info("found %d breakpoint and %d watchpoint registers.\n",
 987                core_num_brps, core_num_wrps);
 988
 989        /* Register debug fault handlers. */
 990        hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
 991                              TRAP_HWBKPT, "hw-breakpoint handler");
 992        hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
 993                              TRAP_HWBKPT, "hw-watchpoint handler");
 994
 995        /*
 996         * Reset the breakpoint resources. We assume that a halting
 997         * debugger will leave the world in a nice state for us.
 998         */
 999        ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
1000                          "perf/arm64/hw_breakpoint:starting",
1001                          hw_breakpoint_reset, NULL);
1002        if (ret)
1003                pr_err("failed to register CPU hotplug notifier: %d\n", ret);
1004
1005        /* Register cpu_suspend hw breakpoint restore hook */
1006        cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
1007
1008        return ret;
1009}
1010arch_initcall(arch_hw_breakpoint_init);
1011
1012void hw_breakpoint_pmu_read(struct perf_event *bp)
1013{
1014}
1015
1016/*
1017 * Dummy function to register with die_notifier.
1018 */
1019int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
1020                                    unsigned long val, void *data)
1021{
1022        return NOTIFY_DONE;
1023}
1024