linux/drivers/hwtracing/coresight/coresight-cpu-debug.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2017 Linaro Limited. All rights reserved.
   4 *
   5 * Author: Leo Yan <leo.yan@linaro.org>
   6 */
   7#include <linux/amba/bus.h>
   8#include <linux/coresight.h>
   9#include <linux/cpu.h>
  10#include <linux/debugfs.h>
  11#include <linux/delay.h>
  12#include <linux/device.h>
  13#include <linux/err.h>
  14#include <linux/init.h>
  15#include <linux/io.h>
  16#include <linux/iopoll.h>
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/moduleparam.h>
  20#include <linux/panic_notifier.h>
  21#include <linux/pm_qos.h>
  22#include <linux/slab.h>
  23#include <linux/smp.h>
  24#include <linux/types.h>
  25#include <linux/uaccess.h>
  26
  27#include "coresight-priv.h"
  28
  29#define EDPCSR                          0x0A0
  30#define EDCIDSR                         0x0A4
  31#define EDVIDSR                         0x0A8
  32#define EDPCSR_HI                       0x0AC
  33#define EDOSLAR                         0x300
  34#define EDPRCR                          0x310
  35#define EDPRSR                          0x314
  36#define EDDEVID1                        0xFC4
  37#define EDDEVID                         0xFC8
  38
  39#define EDPCSR_PROHIBITED               0xFFFFFFFF
  40
  41/* bits definition for EDPCSR */
  42#define EDPCSR_THUMB                    BIT(0)
  43#define EDPCSR_ARM_INST_MASK            GENMASK(31, 2)
  44#define EDPCSR_THUMB_INST_MASK          GENMASK(31, 1)
  45
  46/* bits definition for EDPRCR */
  47#define EDPRCR_COREPURQ                 BIT(3)
  48#define EDPRCR_CORENPDRQ                BIT(0)
  49
  50/* bits definition for EDPRSR */
  51#define EDPRSR_DLK                      BIT(6)
  52#define EDPRSR_PU                       BIT(0)
  53
  54/* bits definition for EDVIDSR */
  55#define EDVIDSR_NS                      BIT(31)
  56#define EDVIDSR_E2                      BIT(30)
  57#define EDVIDSR_E3                      BIT(29)
  58#define EDVIDSR_HV                      BIT(28)
  59#define EDVIDSR_VMID                    GENMASK(7, 0)
  60
  61/*
  62 * bits definition for EDDEVID1:PSCROffset
  63 *
  64 * NOTE: armv8 and armv7 have different definition for the register,
  65 * so consolidate the bits definition as below:
  66 *
  67 * 0b0000 - Sample offset applies based on the instruction state, we
  68 *          rely on EDDEVID to check if EDPCSR is implemented or not
  69 * 0b0001 - No offset applies.
  70 * 0b0010 - No offset applies, but do not use in AArch32 mode
  71 *
  72 */
  73#define EDDEVID1_PCSR_OFFSET_MASK       GENMASK(3, 0)
  74#define EDDEVID1_PCSR_OFFSET_INS_SET    (0x0)
  75#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32     (0x2)
  76
  77/* bits definition for EDDEVID */
  78#define EDDEVID_PCSAMPLE_MODE           GENMASK(3, 0)
  79#define EDDEVID_IMPL_EDPCSR             (0x1)
  80#define EDDEVID_IMPL_EDPCSR_EDCIDSR     (0x2)
  81#define EDDEVID_IMPL_FULL               (0x3)
  82
  83#define DEBUG_WAIT_SLEEP                1000
  84#define DEBUG_WAIT_TIMEOUT              32000
  85
  86struct debug_drvdata {
  87        void __iomem    *base;
  88        struct device   *dev;
  89        int             cpu;
  90
  91        bool            edpcsr_present;
  92        bool            edcidsr_present;
  93        bool            edvidsr_present;
  94        bool            pc_has_offset;
  95
  96        u32             edpcsr;
  97        u32             edpcsr_hi;
  98        u32             edprsr;
  99        u32             edvidsr;
 100        u32             edcidsr;
 101};
 102
 103static DEFINE_MUTEX(debug_lock);
 104static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
 105static int debug_count;
 106static struct dentry *debug_debugfs_dir;
 107
 108static bool debug_enable;
 109module_param_named(enable, debug_enable, bool, 0600);
 110MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
 111
 112static void debug_os_unlock(struct debug_drvdata *drvdata)
 113{
 114        /* Unlocks the debug registers */
 115        writel_relaxed(0x0, drvdata->base + EDOSLAR);
 116
 117        /* Make sure the registers are unlocked before accessing */
 118        wmb();
 119}
 120
 121/*
 122 * According to ARM DDI 0487A.k, before access external debug
 123 * registers should firstly check the access permission; if any
 124 * below condition has been met then cannot access debug
 125 * registers to avoid lockup issue:
 126 *
 127 * - CPU power domain is powered off;
 128 * - The OS Double Lock is locked;
 129 *
 130 * By checking EDPRSR can get to know if meet these conditions.
 131 */
 132static bool debug_access_permitted(struct debug_drvdata *drvdata)
 133{
 134        /* CPU is powered off */
 135        if (!(drvdata->edprsr & EDPRSR_PU))
 136                return false;
 137
 138        /* The OS Double Lock is locked */
 139        if (drvdata->edprsr & EDPRSR_DLK)
 140                return false;
 141
 142        return true;
 143}
 144
 145static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
 146{
 147        u32 edprcr;
 148
 149try_again:
 150
 151        /*
 152         * Send request to power management controller and assert
 153         * DBGPWRUPREQ signal; if power management controller has
 154         * sane implementation, it should enable CPU power domain
 155         * in case CPU is in low power state.
 156         */
 157        edprcr = readl_relaxed(drvdata->base + EDPRCR);
 158        edprcr |= EDPRCR_COREPURQ;
 159        writel_relaxed(edprcr, drvdata->base + EDPRCR);
 160
 161        /* Wait for CPU to be powered up (timeout~=32ms) */
 162        if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
 163                        drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
 164                        DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
 165                /*
 166                 * Unfortunately the CPU cannot be powered up, so return
 167                 * back and later has no permission to access other
 168                 * registers. For this case, should disable CPU low power
 169                 * states to ensure CPU power domain is enabled!
 170                 */
 171                dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
 172                        __func__, drvdata->cpu);
 173                return;
 174        }
 175
 176        /*
 177         * At this point the CPU is powered up, so set the no powerdown
 178         * request bit so we don't lose power and emulate power down.
 179         */
 180        edprcr = readl_relaxed(drvdata->base + EDPRCR);
 181        edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
 182        writel_relaxed(edprcr, drvdata->base + EDPRCR);
 183
 184        drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
 185
 186        /* The core power domain got switched off on use, try again */
 187        if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
 188                goto try_again;
 189}
 190
 191static void debug_read_regs(struct debug_drvdata *drvdata)
 192{
 193        u32 save_edprcr;
 194
 195        CS_UNLOCK(drvdata->base);
 196
 197        /* Unlock os lock */
 198        debug_os_unlock(drvdata);
 199
 200        /* Save EDPRCR register */
 201        save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
 202
 203        /*
 204         * Ensure CPU power domain is enabled to let registers
 205         * are accessiable.
 206         */
 207        debug_force_cpu_powered_up(drvdata);
 208
 209        if (!debug_access_permitted(drvdata))
 210                goto out;
 211
 212        drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
 213
 214        /*
 215         * As described in ARM DDI 0487A.k, if the processing
 216         * element (PE) is in debug state, or sample-based
 217         * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
 218         * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
 219         * UNKNOWN state. So directly bail out for this case.
 220         */
 221        if (drvdata->edpcsr == EDPCSR_PROHIBITED)
 222                goto out;
 223
 224        /*
 225         * A read of the EDPCSR normally has the side-effect of
 226         * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
 227         * at this point it's safe to read value from them.
 228         */
 229        if (IS_ENABLED(CONFIG_64BIT))
 230                drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
 231
 232        if (drvdata->edcidsr_present)
 233                drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
 234
 235        if (drvdata->edvidsr_present)
 236                drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
 237
 238out:
 239        /* Restore EDPRCR register */
 240        writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
 241
 242        CS_LOCK(drvdata->base);
 243}
 244
 245#ifdef CONFIG_64BIT
 246static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
 247{
 248        return (unsigned long)drvdata->edpcsr_hi << 32 |
 249               (unsigned long)drvdata->edpcsr;
 250}
 251#else
 252static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
 253{
 254        unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
 255        unsigned long pc;
 256
 257        pc = (unsigned long)drvdata->edpcsr;
 258
 259        if (drvdata->pc_has_offset) {
 260                arm_inst_offset = 8;
 261                thumb_inst_offset = 4;
 262        }
 263
 264        /* Handle thumb instruction */
 265        if (pc & EDPCSR_THUMB) {
 266                pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
 267                return pc;
 268        }
 269
 270        /*
 271         * Handle arm instruction offset, if the arm instruction
 272         * is not 4 byte alignment then it's possible the case
 273         * for implementation defined; keep original value for this
 274         * case and print info for notice.
 275         */
 276        if (pc & BIT(1))
 277                dev_emerg(drvdata->dev,
 278                          "Instruction offset is implementation defined\n");
 279        else
 280                pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
 281
 282        return pc;
 283}
 284#endif
 285
 286static void debug_dump_regs(struct debug_drvdata *drvdata)
 287{
 288        struct device *dev = drvdata->dev;
 289        unsigned long pc;
 290
 291        dev_emerg(dev, " EDPRSR:  %08x (Power:%s DLK:%s)\n",
 292                  drvdata->edprsr,
 293                  drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
 294                  drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
 295
 296        if (!debug_access_permitted(drvdata)) {
 297                dev_emerg(dev, "No permission to access debug registers!\n");
 298                return;
 299        }
 300
 301        if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
 302                dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
 303                return;
 304        }
 305
 306        pc = debug_adjust_pc(drvdata);
 307        dev_emerg(dev, " EDPCSR:  %pS\n", (void *)pc);
 308
 309        if (drvdata->edcidsr_present)
 310                dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
 311
 312        if (drvdata->edvidsr_present)
 313                dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
 314                          drvdata->edvidsr,
 315                          drvdata->edvidsr & EDVIDSR_NS ?
 316                          "Non-secure" : "Secure",
 317                          drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
 318                                (drvdata->edvidsr & EDVIDSR_E2 ?
 319                                 "EL2" : "EL1/0"),
 320                          drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
 321                          drvdata->edvidsr & (u32)EDVIDSR_VMID);
 322}
 323
 324static void debug_init_arch_data(void *info)
 325{
 326        struct debug_drvdata *drvdata = info;
 327        u32 mode, pcsr_offset;
 328        u32 eddevid, eddevid1;
 329
 330        CS_UNLOCK(drvdata->base);
 331
 332        /* Read device info */
 333        eddevid  = readl_relaxed(drvdata->base + EDDEVID);
 334        eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
 335
 336        CS_LOCK(drvdata->base);
 337
 338        /* Parse implementation feature */
 339        mode = eddevid & EDDEVID_PCSAMPLE_MODE;
 340        pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
 341
 342        drvdata->edpcsr_present  = false;
 343        drvdata->edcidsr_present = false;
 344        drvdata->edvidsr_present = false;
 345        drvdata->pc_has_offset   = false;
 346
 347        switch (mode) {
 348        case EDDEVID_IMPL_FULL:
 349                drvdata->edvidsr_present = true;
 350                fallthrough;
 351        case EDDEVID_IMPL_EDPCSR_EDCIDSR:
 352                drvdata->edcidsr_present = true;
 353                fallthrough;
 354        case EDDEVID_IMPL_EDPCSR:
 355                /*
 356                 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
 357                 * define if has the offset for PC sampling value; if read
 358                 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
 359                 * module does not sample the instruction set state when
 360                 * armv8 CPU in AArch32 state.
 361                 */
 362                drvdata->edpcsr_present =
 363                        ((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
 364                         (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
 365
 366                drvdata->pc_has_offset =
 367                        (pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
 368                break;
 369        default:
 370                break;
 371        }
 372}
 373
 374/*
 375 * Dump out information on panic.
 376 */
 377static int debug_notifier_call(struct notifier_block *self,
 378                               unsigned long v, void *p)
 379{
 380        int cpu;
 381        struct debug_drvdata *drvdata;
 382
 383        mutex_lock(&debug_lock);
 384
 385        /* Bail out if the functionality is disabled */
 386        if (!debug_enable)
 387                goto skip_dump;
 388
 389        pr_emerg("ARM external debug module:\n");
 390
 391        for_each_possible_cpu(cpu) {
 392                drvdata = per_cpu(debug_drvdata, cpu);
 393                if (!drvdata)
 394                        continue;
 395
 396                dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
 397
 398                debug_read_regs(drvdata);
 399                debug_dump_regs(drvdata);
 400        }
 401
 402skip_dump:
 403        mutex_unlock(&debug_lock);
 404        return 0;
 405}
 406
 407static struct notifier_block debug_notifier = {
 408        .notifier_call = debug_notifier_call,
 409};
 410
 411static int debug_enable_func(void)
 412{
 413        struct debug_drvdata *drvdata;
 414        int cpu, ret = 0;
 415        cpumask_t mask;
 416
 417        /*
 418         * Use cpumask to track which debug power domains have
 419         * been powered on and use it to handle failure case.
 420         */
 421        cpumask_clear(&mask);
 422
 423        for_each_possible_cpu(cpu) {
 424                drvdata = per_cpu(debug_drvdata, cpu);
 425                if (!drvdata)
 426                        continue;
 427
 428                ret = pm_runtime_get_sync(drvdata->dev);
 429                if (ret < 0)
 430                        goto err;
 431                else
 432                        cpumask_set_cpu(cpu, &mask);
 433        }
 434
 435        return 0;
 436
 437err:
 438        /*
 439         * If pm_runtime_get_sync() has failed, need rollback on
 440         * all the other CPUs that have been enabled before that.
 441         */
 442        for_each_cpu(cpu, &mask) {
 443                drvdata = per_cpu(debug_drvdata, cpu);
 444                pm_runtime_put_noidle(drvdata->dev);
 445        }
 446
 447        return ret;
 448}
 449
 450static int debug_disable_func(void)
 451{
 452        struct debug_drvdata *drvdata;
 453        int cpu, ret, err = 0;
 454
 455        /*
 456         * Disable debug power domains, records the error and keep
 457         * circling through all other CPUs when an error has been
 458         * encountered.
 459         */
 460        for_each_possible_cpu(cpu) {
 461                drvdata = per_cpu(debug_drvdata, cpu);
 462                if (!drvdata)
 463                        continue;
 464
 465                ret = pm_runtime_put(drvdata->dev);
 466                if (ret < 0)
 467                        err = ret;
 468        }
 469
 470        return err;
 471}
 472
 473static ssize_t debug_func_knob_write(struct file *f,
 474                const char __user *buf, size_t count, loff_t *ppos)
 475{
 476        u8 val;
 477        int ret;
 478
 479        ret = kstrtou8_from_user(buf, count, 2, &val);
 480        if (ret)
 481                return ret;
 482
 483        mutex_lock(&debug_lock);
 484
 485        if (val == debug_enable)
 486                goto out;
 487
 488        if (val)
 489                ret = debug_enable_func();
 490        else
 491                ret = debug_disable_func();
 492
 493        if (ret) {
 494                pr_err("%s: unable to %s debug function: %d\n",
 495                       __func__, val ? "enable" : "disable", ret);
 496                goto err;
 497        }
 498
 499        debug_enable = val;
 500out:
 501        ret = count;
 502err:
 503        mutex_unlock(&debug_lock);
 504        return ret;
 505}
 506
 507static ssize_t debug_func_knob_read(struct file *f,
 508                char __user *ubuf, size_t count, loff_t *ppos)
 509{
 510        ssize_t ret;
 511        char buf[3];
 512
 513        mutex_lock(&debug_lock);
 514        snprintf(buf, sizeof(buf), "%d\n", debug_enable);
 515        mutex_unlock(&debug_lock);
 516
 517        ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
 518        return ret;
 519}
 520
 521static const struct file_operations debug_func_knob_fops = {
 522        .open   = simple_open,
 523        .read   = debug_func_knob_read,
 524        .write  = debug_func_knob_write,
 525};
 526
 527static int debug_func_init(void)
 528{
 529        int ret;
 530
 531        /* Create debugfs node */
 532        debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
 533        debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
 534                            &debug_func_knob_fops);
 535
 536        /* Register function to be called for panic */
 537        ret = atomic_notifier_chain_register(&panic_notifier_list,
 538                                             &debug_notifier);
 539        if (ret) {
 540                pr_err("%s: unable to register notifier: %d\n",
 541                       __func__, ret);
 542                goto err;
 543        }
 544
 545        return 0;
 546
 547err:
 548        debugfs_remove_recursive(debug_debugfs_dir);
 549        return ret;
 550}
 551
 552static void debug_func_exit(void)
 553{
 554        atomic_notifier_chain_unregister(&panic_notifier_list,
 555                                         &debug_notifier);
 556        debugfs_remove_recursive(debug_debugfs_dir);
 557}
 558
 559static int debug_probe(struct amba_device *adev, const struct amba_id *id)
 560{
 561        void __iomem *base;
 562        struct device *dev = &adev->dev;
 563        struct debug_drvdata *drvdata;
 564        struct resource *res = &adev->res;
 565        int ret;
 566
 567        drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
 568        if (!drvdata)
 569                return -ENOMEM;
 570
 571        drvdata->cpu = coresight_get_cpu(dev);
 572        if (drvdata->cpu < 0)
 573                return drvdata->cpu;
 574
 575        if (per_cpu(debug_drvdata, drvdata->cpu)) {
 576                dev_err(dev, "CPU%d drvdata has already been initialized\n",
 577                        drvdata->cpu);
 578                return -EBUSY;
 579        }
 580
 581        drvdata->dev = &adev->dev;
 582        amba_set_drvdata(adev, drvdata);
 583
 584        /* Validity for the resource is already checked by the AMBA core */
 585        base = devm_ioremap_resource(dev, res);
 586        if (IS_ERR(base))
 587                return PTR_ERR(base);
 588
 589        drvdata->base = base;
 590
 591        cpus_read_lock();
 592        per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
 593        ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
 594                                       drvdata, 1);
 595        cpus_read_unlock();
 596
 597        if (ret) {
 598                dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
 599                goto err;
 600        }
 601
 602        if (!drvdata->edpcsr_present) {
 603                dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
 604                        drvdata->cpu);
 605                ret = -ENXIO;
 606                goto err;
 607        }
 608
 609        if (!debug_count++) {
 610                ret = debug_func_init();
 611                if (ret)
 612                        goto err_func_init;
 613        }
 614
 615        mutex_lock(&debug_lock);
 616        /* Turn off debug power domain if debugging is disabled */
 617        if (!debug_enable)
 618                pm_runtime_put(dev);
 619        mutex_unlock(&debug_lock);
 620
 621        dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
 622        return 0;
 623
 624err_func_init:
 625        debug_count--;
 626err:
 627        per_cpu(debug_drvdata, drvdata->cpu) = NULL;
 628        return ret;
 629}
 630
 631static void debug_remove(struct amba_device *adev)
 632{
 633        struct device *dev = &adev->dev;
 634        struct debug_drvdata *drvdata = amba_get_drvdata(adev);
 635
 636        per_cpu(debug_drvdata, drvdata->cpu) = NULL;
 637
 638        mutex_lock(&debug_lock);
 639        /* Turn off debug power domain before rmmod the module */
 640        if (debug_enable)
 641                pm_runtime_put(dev);
 642        mutex_unlock(&debug_lock);
 643
 644        if (!--debug_count)
 645                debug_func_exit();
 646}
 647
 648static const struct amba_cs_uci_id uci_id_debug[] = {
 649        {
 650                /*  CPU Debug UCI data */
 651                .devarch        = 0x47706a15,
 652                .devarch_mask   = 0xfff0ffff,
 653                .devtype        = 0x00000015,
 654        }
 655};
 656
 657static const struct amba_id debug_ids[] = {
 658        CS_AMBA_ID(0x000bbd03),                         /* Cortex-A53 */
 659        CS_AMBA_ID(0x000bbd07),                         /* Cortex-A57 */
 660        CS_AMBA_ID(0x000bbd08),                         /* Cortex-A72 */
 661        CS_AMBA_ID(0x000bbd09),                         /* Cortex-A73 */
 662        CS_AMBA_UCI_ID(0x000f0205, uci_id_debug),       /* Qualcomm Kryo */
 663        CS_AMBA_UCI_ID(0x000f0211, uci_id_debug),       /* Qualcomm Kryo */
 664        {},
 665};
 666
 667MODULE_DEVICE_TABLE(amba, debug_ids);
 668
 669static struct amba_driver debug_driver = {
 670        .drv = {
 671                .name   = "coresight-cpu-debug",
 672                .suppress_bind_attrs = true,
 673        },
 674        .probe          = debug_probe,
 675        .remove         = debug_remove,
 676        .id_table       = debug_ids,
 677};
 678
 679module_amba_driver(debug_driver);
 680
 681MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
 682MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
 683MODULE_LICENSE("GPL");
 684