linux/arch/powerpc/kernel/hw_breakpoint.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
   4 * using the CPU's debug registers. Derived from
   5 * "arch/x86/kernel/hw_breakpoint.c"
   6 *
   7 * Copyright 2010 IBM Corporation
   8 * Author: K.Prasad <prasad@linux.vnet.ibm.com>
   9 */
  10
  11#include <linux/hw_breakpoint.h>
  12#include <linux/notifier.h>
  13#include <linux/kprobes.h>
  14#include <linux/percpu.h>
  15#include <linux/kernel.h>
  16#include <linux/sched.h>
  17#include <linux/smp.h>
  18#include <linux/debugfs.h>
  19#include <linux/init.h>
  20
  21#include <asm/hw_breakpoint.h>
  22#include <asm/processor.h>
  23#include <asm/sstep.h>
  24#include <asm/debug.h>
  25#include <asm/hvcall.h>
  26#include <asm/inst.h>
  27#include <linux/uaccess.h>
  28
  29/*
  30 * Stores the breakpoints currently in use on each breakpoint address
  31 * register for every cpu
  32 */
  33static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
  34
  35/*
  36 * Returns total number of data or instruction breakpoints available.
  37 */
  38int hw_breakpoint_slots(int type)
  39{
  40        if (type == TYPE_DATA)
  41                return nr_wp_slots();
  42        return 0;               /* no instruction breakpoints available */
  43}
  44
  45static bool single_step_pending(void)
  46{
  47        int i;
  48
  49        for (i = 0; i < nr_wp_slots(); i++) {
  50                if (current->thread.last_hit_ubp[i])
  51                        return true;
  52        }
  53        return false;
  54}
  55
  56/*
  57 * Install a perf counter breakpoint.
  58 *
  59 * We seek a free debug address register and use it for this
  60 * breakpoint.
  61 *
  62 * Atomic: we hold the counter->ctx->lock and we only handle variables
  63 * and registers local to this cpu.
  64 */
  65int arch_install_hw_breakpoint(struct perf_event *bp)
  66{
  67        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  68        struct perf_event **slot;
  69        int i;
  70
  71        for (i = 0; i < nr_wp_slots(); i++) {
  72                slot = this_cpu_ptr(&bp_per_reg[i]);
  73                if (!*slot) {
  74                        *slot = bp;
  75                        break;
  76                }
  77        }
  78
  79        if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
  80                return -EBUSY;
  81
  82        /*
  83         * Do not install DABR values if the instruction must be single-stepped.
  84         * If so, DABR will be populated in single_step_dabr_instruction().
  85         */
  86        if (!single_step_pending())
  87                __set_breakpoint(i, info);
  88
  89        return 0;
  90}
  91
  92/*
  93 * Uninstall the breakpoint contained in the given counter.
  94 *
  95 * First we search the debug address register it uses and then we disable
  96 * it.
  97 *
  98 * Atomic: we hold the counter->ctx->lock and we only handle variables
  99 * and registers local to this cpu.
 100 */
 101void arch_uninstall_hw_breakpoint(struct perf_event *bp)
 102{
 103        struct arch_hw_breakpoint null_brk = {0};
 104        struct perf_event **slot;
 105        int i;
 106
 107        for (i = 0; i < nr_wp_slots(); i++) {
 108                slot = this_cpu_ptr(&bp_per_reg[i]);
 109                if (*slot == bp) {
 110                        *slot = NULL;
 111                        break;
 112                }
 113        }
 114
 115        if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
 116                return;
 117
 118        __set_breakpoint(i, &null_brk);
 119}
 120
 121static bool is_ptrace_bp(struct perf_event *bp)
 122{
 123        return bp->overflow_handler == ptrace_triggered;
 124}
 125
 126struct breakpoint {
 127        struct list_head list;
 128        struct perf_event *bp;
 129        bool ptrace_bp;
 130};
 131
 132static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
 133static LIST_HEAD(task_bps);
 134
 135static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
 136{
 137        struct breakpoint *tmp;
 138
 139        tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
 140        if (!tmp)
 141                return ERR_PTR(-ENOMEM);
 142        tmp->bp = bp;
 143        tmp->ptrace_bp = is_ptrace_bp(bp);
 144        return tmp;
 145}
 146
 147static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
 148{
 149        __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
 150
 151        bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE);
 152        bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE);
 153        bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE);
 154        bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE);
 155
 156        return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr);
 157}
 158
 159static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
 160{
 161        return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
 162}
 163
 164static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
 165{
 166        return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
 167}
 168
 169static int task_bps_add(struct perf_event *bp)
 170{
 171        struct breakpoint *tmp;
 172
 173        tmp = alloc_breakpoint(bp);
 174        if (IS_ERR(tmp))
 175                return PTR_ERR(tmp);
 176
 177        list_add(&tmp->list, &task_bps);
 178        return 0;
 179}
 180
 181static void task_bps_remove(struct perf_event *bp)
 182{
 183        struct list_head *pos, *q;
 184
 185        list_for_each_safe(pos, q, &task_bps) {
 186                struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
 187
 188                if (tmp->bp == bp) {
 189                        list_del(&tmp->list);
 190                        kfree(tmp);
 191                        break;
 192                }
 193        }
 194}
 195
 196/*
 197 * If any task has breakpoint from alternate infrastructure,
 198 * return true. Otherwise return false.
 199 */
 200static bool all_task_bps_check(struct perf_event *bp)
 201{
 202        struct breakpoint *tmp;
 203
 204        list_for_each_entry(tmp, &task_bps, list) {
 205                if (!can_co_exist(tmp, bp))
 206                        return true;
 207        }
 208        return false;
 209}
 210
 211/*
 212 * If same task has breakpoint from alternate infrastructure,
 213 * return true. Otherwise return false.
 214 */
 215static bool same_task_bps_check(struct perf_event *bp)
 216{
 217        struct breakpoint *tmp;
 218
 219        list_for_each_entry(tmp, &task_bps, list) {
 220                if (tmp->bp->hw.target == bp->hw.target &&
 221                    !can_co_exist(tmp, bp))
 222                        return true;
 223        }
 224        return false;
 225}
 226
 227static int cpu_bps_add(struct perf_event *bp)
 228{
 229        struct breakpoint **cpu_bp;
 230        struct breakpoint *tmp;
 231        int i = 0;
 232
 233        tmp = alloc_breakpoint(bp);
 234        if (IS_ERR(tmp))
 235                return PTR_ERR(tmp);
 236
 237        cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
 238        for (i = 0; i < nr_wp_slots(); i++) {
 239                if (!cpu_bp[i]) {
 240                        cpu_bp[i] = tmp;
 241                        break;
 242                }
 243        }
 244        return 0;
 245}
 246
 247static void cpu_bps_remove(struct perf_event *bp)
 248{
 249        struct breakpoint **cpu_bp;
 250        int i = 0;
 251
 252        cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
 253        for (i = 0; i < nr_wp_slots(); i++) {
 254                if (!cpu_bp[i])
 255                        continue;
 256
 257                if (cpu_bp[i]->bp == bp) {
 258                        kfree(cpu_bp[i]);
 259                        cpu_bp[i] = NULL;
 260                        break;
 261                }
 262        }
 263}
 264
 265static bool cpu_bps_check(int cpu, struct perf_event *bp)
 266{
 267        struct breakpoint **cpu_bp;
 268        int i;
 269
 270        cpu_bp = per_cpu_ptr(cpu_bps, cpu);
 271        for (i = 0; i < nr_wp_slots(); i++) {
 272                if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
 273                        return true;
 274        }
 275        return false;
 276}
 277
 278static bool all_cpu_bps_check(struct perf_event *bp)
 279{
 280        int cpu;
 281
 282        for_each_online_cpu(cpu) {
 283                if (cpu_bps_check(cpu, bp))
 284                        return true;
 285        }
 286        return false;
 287}
 288
 289/*
 290 * We don't use any locks to serialize accesses to cpu_bps or task_bps
 291 * because are already inside nr_bp_mutex.
 292 */
 293int arch_reserve_bp_slot(struct perf_event *bp)
 294{
 295        int ret;
 296
 297        /* ptrace breakpoint */
 298        if (is_ptrace_bp(bp)) {
 299                if (all_cpu_bps_check(bp))
 300                        return -ENOSPC;
 301
 302                if (same_task_bps_check(bp))
 303                        return -ENOSPC;
 304
 305                return task_bps_add(bp);
 306        }
 307
 308        /* perf breakpoint */
 309        if (is_kernel_addr(bp->attr.bp_addr))
 310                return 0;
 311
 312        if (bp->hw.target && bp->cpu == -1) {
 313                if (same_task_bps_check(bp))
 314                        return -ENOSPC;
 315
 316                return task_bps_add(bp);
 317        } else if (!bp->hw.target && bp->cpu != -1) {
 318                if (all_task_bps_check(bp))
 319                        return -ENOSPC;
 320
 321                return cpu_bps_add(bp);
 322        }
 323
 324        if (same_task_bps_check(bp))
 325                return -ENOSPC;
 326
 327        ret = cpu_bps_add(bp);
 328        if (ret)
 329                return ret;
 330        ret = task_bps_add(bp);
 331        if (ret)
 332                cpu_bps_remove(bp);
 333
 334        return ret;
 335}
 336
 337void arch_release_bp_slot(struct perf_event *bp)
 338{
 339        if (!is_kernel_addr(bp->attr.bp_addr)) {
 340                if (bp->hw.target)
 341                        task_bps_remove(bp);
 342                if (bp->cpu != -1)
 343                        cpu_bps_remove(bp);
 344        }
 345}
 346
 347/*
 348 * Perform cleanup of arch-specific counters during unregistration
 349 * of the perf-event
 350 */
 351void arch_unregister_hw_breakpoint(struct perf_event *bp)
 352{
 353        /*
 354         * If the breakpoint is unregistered between a hw_breakpoint_handler()
 355         * and the single_step_dabr_instruction(), then cleanup the breakpoint
 356         * restoration variables to prevent dangling pointers.
 357         * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
 358         */
 359        if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) {
 360                int i;
 361
 362                for (i = 0; i < nr_wp_slots(); i++) {
 363                        if (bp->ctx->task->thread.last_hit_ubp[i] == bp)
 364                                bp->ctx->task->thread.last_hit_ubp[i] = NULL;
 365                }
 366        }
 367}
 368
 369/*
 370 * Check for virtual address in kernel space.
 371 */
 372int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
 373{
 374        return is_kernel_addr(hw->address);
 375}
 376
 377int arch_bp_generic_fields(int type, int *gen_bp_type)
 378{
 379        *gen_bp_type = 0;
 380        if (type & HW_BRK_TYPE_READ)
 381                *gen_bp_type |= HW_BREAKPOINT_R;
 382        if (type & HW_BRK_TYPE_WRITE)
 383                *gen_bp_type |= HW_BREAKPOINT_W;
 384        if (*gen_bp_type == 0)
 385                return -EINVAL;
 386        return 0;
 387}
 388
 389/*
 390 * Watchpoint match range is always doubleword(8 bytes) aligned on
 391 * powerpc. If the given range is crossing doubleword boundary, we
 392 * need to increase the length such that next doubleword also get
 393 * covered. Ex,
 394 *
 395 *          address   len = 6 bytes
 396 *                |=========.
 397 *   |------------v--|------v--------|
 398 *   | | | | | | | | | | | | | | | | |
 399 *   |---------------|---------------|
 400 *    <---8 bytes--->
 401 *
 402 * In this case, we should configure hw as:
 403 *   start_addr = address & ~(HW_BREAKPOINT_SIZE - 1)
 404 *   len = 16 bytes
 405 *
 406 * @start_addr is inclusive but @end_addr is exclusive.
 407 */
 408static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
 409{
 410        u16 max_len = DABR_MAX_LEN;
 411        u16 hw_len;
 412        unsigned long start_addr, end_addr;
 413
 414        start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
 415        end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
 416        hw_len = end_addr - start_addr;
 417
 418        if (dawr_enabled()) {
 419                max_len = DAWR_MAX_LEN;
 420                /* DAWR region can't cross 512 bytes boundary on p10 predecessors */
 421                if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
 422                    (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)))
 423                        return -EINVAL;
 424        } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
 425                /* 8xx can setup a range without limitation */
 426                max_len = U16_MAX;
 427        }
 428
 429        if (hw_len > max_len)
 430                return -EINVAL;
 431
 432        hw->hw_len = hw_len;
 433        return 0;
 434}
 435
 436/*
 437 * Validate the arch-specific HW Breakpoint register settings
 438 */
 439int hw_breakpoint_arch_parse(struct perf_event *bp,
 440                             const struct perf_event_attr *attr,
 441                             struct arch_hw_breakpoint *hw)
 442{
 443        int ret = -EINVAL;
 444
 445        if (!bp || !attr->bp_len)
 446                return ret;
 447
 448        hw->type = HW_BRK_TYPE_TRANSLATE;
 449        if (attr->bp_type & HW_BREAKPOINT_R)
 450                hw->type |= HW_BRK_TYPE_READ;
 451        if (attr->bp_type & HW_BREAKPOINT_W)
 452                hw->type |= HW_BRK_TYPE_WRITE;
 453        if (hw->type == HW_BRK_TYPE_TRANSLATE)
 454                /* must set alteast read or write */
 455                return ret;
 456        if (!attr->exclude_user)
 457                hw->type |= HW_BRK_TYPE_USER;
 458        if (!attr->exclude_kernel)
 459                hw->type |= HW_BRK_TYPE_KERNEL;
 460        if (!attr->exclude_hv)
 461                hw->type |= HW_BRK_TYPE_HYP;
 462        hw->address = attr->bp_addr;
 463        hw->len = attr->bp_len;
 464
 465        if (!ppc_breakpoint_available())
 466                return -ENODEV;
 467
 468        return hw_breakpoint_validate_len(hw);
 469}
 470
 471/*
 472 * Restores the breakpoint on the debug registers.
 473 * Invoke this function if it is known that the execution context is
 474 * about to change to cause loss of MSR_SE settings.
 475 */
 476void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
 477{
 478        struct arch_hw_breakpoint *info;
 479        int i;
 480
 481        for (i = 0; i < nr_wp_slots(); i++) {
 482                if (unlikely(tsk->thread.last_hit_ubp[i]))
 483                        goto reset;
 484        }
 485        return;
 486
 487reset:
 488        regs_set_return_msr(regs, regs->msr & ~MSR_SE);
 489        for (i = 0; i < nr_wp_slots(); i++) {
 490                info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
 491                __set_breakpoint(i, info);
 492                tsk->thread.last_hit_ubp[i] = NULL;
 493        }
 494}
 495
 496static bool is_larx_stcx_instr(int type)
 497{
 498        return type == LARX || type == STCX;
 499}
 500
 501static bool is_octword_vsx_instr(int type, int size)
 502{
 503        return ((type == LOAD_VSX || type == STORE_VSX) && size == 32);
 504}
 505
 506/*
 507 * We've failed in reliably handling the hw-breakpoint. Unregister
 508 * it and throw a warning message to let the user know about it.
 509 */
 510static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info)
 511{
 512        WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
 513             info->address);
 514        perf_event_disable_inatomic(bp);
 515}
 516
 517static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info)
 518{
 519        printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
 520                           info->address);
 521        perf_event_disable_inatomic(bp);
 522}
 523
 524static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
 525                             struct arch_hw_breakpoint **info, int *hit,
 526                             struct ppc_inst instr)
 527{
 528        int i;
 529        int stepped;
 530
 531        /* Do not emulate user-space instructions, instead single-step them */
 532        if (user_mode(regs)) {
 533                for (i = 0; i < nr_wp_slots(); i++) {
 534                        if (!hit[i])
 535                                continue;
 536                        current->thread.last_hit_ubp[i] = bp[i];
 537                        info[i] = NULL;
 538                }
 539                regs_set_return_msr(regs, regs->msr | MSR_SE);
 540                return false;
 541        }
 542
 543        stepped = emulate_step(regs, instr);
 544        if (!stepped) {
 545                for (i = 0; i < nr_wp_slots(); i++) {
 546                        if (!hit[i])
 547                                continue;
 548                        handler_error(bp[i], info[i]);
 549                        info[i] = NULL;
 550                }
 551                return false;
 552        }
 553        return true;
 554}
 555
 556static void handle_p10dd1_spurious_exception(struct arch_hw_breakpoint **info,
 557                                             int *hit, unsigned long ea)
 558{
 559        int i;
 560        unsigned long hw_end_addr;
 561
 562        /*
 563         * Handle spurious exception only when any bp_per_reg is set.
 564         * Otherwise this might be created by xmon and not actually a
 565         * spurious exception.
 566         */
 567        for (i = 0; i < nr_wp_slots(); i++) {
 568                if (!info[i])
 569                        continue;
 570
 571                hw_end_addr = ALIGN(info[i]->address + info[i]->len, HW_BREAKPOINT_SIZE);
 572
 573                /*
 574                 * Ending address of DAWR range is less than starting
 575                 * address of op.
 576                 */
 577                if ((hw_end_addr - 1) >= ea)
 578                        continue;
 579
 580                /*
 581                 * Those addresses need to be in the same or in two
 582                 * consecutive 512B blocks;
 583                 */
 584                if (((hw_end_addr - 1) >> 10) != (ea >> 10))
 585                        continue;
 586
 587                /*
 588                 * 'op address + 64B' generates an address that has a
 589                 * carry into bit 52 (crosses 2K boundary).
 590                 */
 591                if ((ea & 0x800) == ((ea + 64) & 0x800))
 592                        continue;
 593
 594                break;
 595        }
 596
 597        if (i == nr_wp_slots())
 598                return;
 599
 600        for (i = 0; i < nr_wp_slots(); i++) {
 601                if (info[i]) {
 602                        hit[i] = 1;
 603                        info[i]->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
 604                }
 605        }
 606}
 607
 608int hw_breakpoint_handler(struct die_args *args)
 609{
 610        bool err = false;
 611        int rc = NOTIFY_STOP;
 612        struct perf_event *bp[HBP_NUM_MAX] = { NULL };
 613        struct pt_regs *regs = args->regs;
 614        struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL };
 615        int i;
 616        int hit[HBP_NUM_MAX] = {0};
 617        int nr_hit = 0;
 618        bool ptrace_bp = false;
 619        struct ppc_inst instr = ppc_inst(0);
 620        int type = 0;
 621        int size = 0;
 622        unsigned long ea;
 623
 624        /* Disable breakpoints during exception handling */
 625        hw_breakpoint_disable();
 626
 627        /*
 628         * The counter may be concurrently released but that can only
 629         * occur from a call_rcu() path. We can then safely fetch
 630         * the breakpoint, use its callback, touch its counter
 631         * while we are in an rcu_read_lock() path.
 632         */
 633        rcu_read_lock();
 634
 635        if (!IS_ENABLED(CONFIG_PPC_8xx))
 636                wp_get_instr_detail(regs, &instr, &type, &size, &ea);
 637
 638        for (i = 0; i < nr_wp_slots(); i++) {
 639                bp[i] = __this_cpu_read(bp_per_reg[i]);
 640                if (!bp[i])
 641                        continue;
 642
 643                info[i] = counter_arch_bp(bp[i]);
 644                info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
 645
 646                if (wp_check_constraints(regs, instr, ea, type, size, info[i])) {
 647                        if (!IS_ENABLED(CONFIG_PPC_8xx) &&
 648                            ppc_inst_equal(instr, ppc_inst(0))) {
 649                                handler_error(bp[i], info[i]);
 650                                info[i] = NULL;
 651                                err = 1;
 652                                continue;
 653                        }
 654
 655                        if (is_ptrace_bp(bp[i]))
 656                                ptrace_bp = true;
 657                        hit[i] = 1;
 658                        nr_hit++;
 659                }
 660        }
 661
 662        if (err)
 663                goto reset;
 664
 665        if (!nr_hit) {
 666                /* Workaround for Power10 DD1 */
 667                if (!IS_ENABLED(CONFIG_PPC_8xx) && mfspr(SPRN_PVR) == 0x800100 &&
 668                    is_octword_vsx_instr(type, size)) {
 669                        handle_p10dd1_spurious_exception(info, hit, ea);
 670                } else {
 671                        rc = NOTIFY_DONE;
 672                        goto out;
 673                }
 674        }
 675
 676        /*
 677         * Return early after invoking user-callback function without restoring
 678         * DABR if the breakpoint is from ptrace which always operates in
 679         * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
 680         * generated in do_dabr().
 681         */
 682        if (ptrace_bp) {
 683                for (i = 0; i < nr_wp_slots(); i++) {
 684                        if (!hit[i])
 685                                continue;
 686                        perf_bp_event(bp[i], regs);
 687                        info[i] = NULL;
 688                }
 689                rc = NOTIFY_DONE;
 690                goto reset;
 691        }
 692
 693        if (!IS_ENABLED(CONFIG_PPC_8xx)) {
 694                if (is_larx_stcx_instr(type)) {
 695                        for (i = 0; i < nr_wp_slots(); i++) {
 696                                if (!hit[i])
 697                                        continue;
 698                                larx_stcx_err(bp[i], info[i]);
 699                                info[i] = NULL;
 700                        }
 701                        goto reset;
 702                }
 703
 704                if (!stepping_handler(regs, bp, info, hit, instr))
 705                        goto reset;
 706        }
 707
 708        /*
 709         * As a policy, the callback is invoked in a 'trigger-after-execute'
 710         * fashion
 711         */
 712        for (i = 0; i < nr_wp_slots(); i++) {
 713                if (!hit[i])
 714                        continue;
 715                if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
 716                        perf_bp_event(bp[i], regs);
 717        }
 718
 719reset:
 720        for (i = 0; i < nr_wp_slots(); i++) {
 721                if (!info[i])
 722                        continue;
 723                __set_breakpoint(i, info[i]);
 724        }
 725
 726out:
 727        rcu_read_unlock();
 728        return rc;
 729}
 730NOKPROBE_SYMBOL(hw_breakpoint_handler);
 731
 732/*
 733 * Handle single-step exceptions following a DABR hit.
 734 */
 735static int single_step_dabr_instruction(struct die_args *args)
 736{
 737        struct pt_regs *regs = args->regs;
 738        struct perf_event *bp = NULL;
 739        struct arch_hw_breakpoint *info;
 740        int i;
 741        bool found = false;
 742
 743        /*
 744         * Check if we are single-stepping as a result of a
 745         * previous HW Breakpoint exception
 746         */
 747        for (i = 0; i < nr_wp_slots(); i++) {
 748                bp = current->thread.last_hit_ubp[i];
 749
 750                if (!bp)
 751                        continue;
 752
 753                found = true;
 754                info = counter_arch_bp(bp);
 755
 756                /*
 757                 * We shall invoke the user-defined callback function in the
 758                 * single stepping handler to confirm to 'trigger-after-execute'
 759                 * semantics
 760                 */
 761                if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
 762                        perf_bp_event(bp, regs);
 763                current->thread.last_hit_ubp[i] = NULL;
 764        }
 765
 766        if (!found)
 767                return NOTIFY_DONE;
 768
 769        for (i = 0; i < nr_wp_slots(); i++) {
 770                bp = __this_cpu_read(bp_per_reg[i]);
 771                if (!bp)
 772                        continue;
 773
 774                info = counter_arch_bp(bp);
 775                __set_breakpoint(i, info);
 776        }
 777
 778        /*
 779         * If the process was being single-stepped by ptrace, let the
 780         * other single-step actions occur (e.g. generate SIGTRAP).
 781         */
 782        if (test_thread_flag(TIF_SINGLESTEP))
 783                return NOTIFY_DONE;
 784
 785        return NOTIFY_STOP;
 786}
 787NOKPROBE_SYMBOL(single_step_dabr_instruction);
 788
 789/*
 790 * Handle debug exception notifications.
 791 */
 792int hw_breakpoint_exceptions_notify(
 793                struct notifier_block *unused, unsigned long val, void *data)
 794{
 795        int ret = NOTIFY_DONE;
 796
 797        switch (val) {
 798        case DIE_DABR_MATCH:
 799                ret = hw_breakpoint_handler(data);
 800                break;
 801        case DIE_SSTEP:
 802                ret = single_step_dabr_instruction(data);
 803                break;
 804        }
 805
 806        return ret;
 807}
 808NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
 809
 810/*
 811 * Release the user breakpoints used by ptrace
 812 */
 813void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
 814{
 815        int i;
 816        struct thread_struct *t = &tsk->thread;
 817
 818        for (i = 0; i < nr_wp_slots(); i++) {
 819                unregister_hw_breakpoint(t->ptrace_bps[i]);
 820                t->ptrace_bps[i] = NULL;
 821        }
 822}
 823
 824void hw_breakpoint_pmu_read(struct perf_event *bp)
 825{
 826        /* TODO */
 827}
 828
 829void ptrace_triggered(struct perf_event *bp,
 830                      struct perf_sample_data *data, struct pt_regs *regs)
 831{
 832        struct perf_event_attr attr;
 833
 834        /*
 835         * Disable the breakpoint request here since ptrace has defined a
 836         * one-shot behaviour for breakpoint exceptions in PPC64.
 837         * The SIGTRAP signal is generated automatically for us in do_dabr().
 838         * We don't have to do anything about that here
 839         */
 840        attr = bp->attr;
 841        attr.disabled = true;
 842        modify_user_hw_breakpoint(bp, &attr);
 843}
 844