linux/drivers/hwtracing/coresight/coresight-etm3x.c
<<
>>
Prefs
   1/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
   2 *
   3 * Description: CoreSight Program Flow Trace driver
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 and
   7 * only version 2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/moduleparam.h>
  17#include <linux/init.h>
  18#include <linux/types.h>
  19#include <linux/device.h>
  20#include <linux/io.h>
  21#include <linux/err.h>
  22#include <linux/fs.h>
  23#include <linux/slab.h>
  24#include <linux/delay.h>
  25#include <linux/smp.h>
  26#include <linux/sysfs.h>
  27#include <linux/stat.h>
  28#include <linux/pm_runtime.h>
  29#include <linux/cpu.h>
  30#include <linux/of.h>
  31#include <linux/coresight.h>
  32#include <linux/coresight-pmu.h>
  33#include <linux/amba/bus.h>
  34#include <linux/seq_file.h>
  35#include <linux/uaccess.h>
  36#include <linux/clk.h>
  37#include <linux/perf_event.h>
  38#include <asm/sections.h>
  39
  40#include "coresight-etm.h"
  41#include "coresight-etm-perf.h"
  42
  43/*
  44 * Not really modular but using module_param is the easiest way to
  45 * remain consistent with existing use cases for now.
  46 */
  47static int boot_enable;
  48module_param_named(boot_enable, boot_enable, int, S_IRUGO);
  49
  50/* The number of ETM/PTM currently registered */
  51static int etm_count;
  52static struct etm_drvdata *etmdrvdata[NR_CPUS];
  53
  54static enum cpuhp_state hp_online;
  55
  56/*
  57 * Memory mapped writes to clear os lock are not supported on some processors
  58 * and OS lock must be unlocked before any memory mapped access on such
  59 * processors, otherwise memory mapped reads/writes will be invalid.
  60 */
  61static void etm_os_unlock(struct etm_drvdata *drvdata)
  62{
  63        /* Writing any value to ETMOSLAR unlocks the trace registers */
  64        etm_writel(drvdata, 0x0, ETMOSLAR);
  65        drvdata->os_unlock = true;
  66        isb();
  67}
  68
  69static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
  70{
  71        u32 etmcr;
  72
  73        /* Ensure pending cp14 accesses complete before setting pwrdwn */
  74        mb();
  75        isb();
  76        etmcr = etm_readl(drvdata, ETMCR);
  77        etmcr |= ETMCR_PWD_DWN;
  78        etm_writel(drvdata, etmcr, ETMCR);
  79}
  80
  81static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
  82{
  83        u32 etmcr;
  84
  85        etmcr = etm_readl(drvdata, ETMCR);
  86        etmcr &= ~ETMCR_PWD_DWN;
  87        etm_writel(drvdata, etmcr, ETMCR);
  88        /* Ensure pwrup completes before subsequent cp14 accesses */
  89        mb();
  90        isb();
  91}
  92
  93static void etm_set_pwrup(struct etm_drvdata *drvdata)
  94{
  95        u32 etmpdcr;
  96
  97        etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
  98        etmpdcr |= ETMPDCR_PWD_UP;
  99        writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
 100        /* Ensure pwrup completes before subsequent cp14 accesses */
 101        mb();
 102        isb();
 103}
 104
 105static void etm_clr_pwrup(struct etm_drvdata *drvdata)
 106{
 107        u32 etmpdcr;
 108
 109        /* Ensure pending cp14 accesses complete before clearing pwrup */
 110        mb();
 111        isb();
 112        etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
 113        etmpdcr &= ~ETMPDCR_PWD_UP;
 114        writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
 115}
 116
 117/**
 118 * coresight_timeout_etm - loop until a bit has changed to a specific state.
 119 * @drvdata: etm's private data structure.
 120 * @offset: address of a register, starting from @addr.
 121 * @position: the position of the bit of interest.
 122 * @value: the value the bit should have.
 123 *
 124 * Basically the same as @coresight_timeout except for the register access
 125 * method where we have to account for CP14 configurations.
 126
 127 * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
 128 * TIMEOUT_US has elapsed, which ever happens first.
 129 */
 130
 131static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
 132                                  int position, int value)
 133{
 134        int i;
 135        u32 val;
 136
 137        for (i = TIMEOUT_US; i > 0; i--) {
 138                val = etm_readl(drvdata, offset);
 139                /* Waiting on the bit to go from 0 to 1 */
 140                if (value) {
 141                        if (val & BIT(position))
 142                                return 0;
 143                /* Waiting on the bit to go from 1 to 0 */
 144                } else {
 145                        if (!(val & BIT(position)))
 146                                return 0;
 147                }
 148
 149                /*
 150                 * Delay is arbitrary - the specification doesn't say how long
 151                 * we are expected to wait.  Extra check required to make sure
 152                 * we don't wait needlessly on the last iteration.
 153                 */
 154                if (i - 1)
 155                        udelay(1);
 156        }
 157
 158        return -EAGAIN;
 159}
 160
 161
 162static void etm_set_prog(struct etm_drvdata *drvdata)
 163{
 164        u32 etmcr;
 165
 166        etmcr = etm_readl(drvdata, ETMCR);
 167        etmcr |= ETMCR_ETM_PRG;
 168        etm_writel(drvdata, etmcr, ETMCR);
 169        /*
 170         * Recommended by spec for cp14 accesses to ensure etmcr write is
 171         * complete before polling etmsr
 172         */
 173        isb();
 174        if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
 175                dev_err(drvdata->dev,
 176                        "%s: timeout observed when probing at offset %#x\n",
 177                        __func__, ETMSR);
 178        }
 179}
 180
 181static void etm_clr_prog(struct etm_drvdata *drvdata)
 182{
 183        u32 etmcr;
 184
 185        etmcr = etm_readl(drvdata, ETMCR);
 186        etmcr &= ~ETMCR_ETM_PRG;
 187        etm_writel(drvdata, etmcr, ETMCR);
 188        /*
 189         * Recommended by spec for cp14 accesses to ensure etmcr write is
 190         * complete before polling etmsr
 191         */
 192        isb();
 193        if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
 194                dev_err(drvdata->dev,
 195                        "%s: timeout observed when probing at offset %#x\n",
 196                        __func__, ETMSR);
 197        }
 198}
 199
 200void etm_set_default(struct etm_config *config)
 201{
 202        int i;
 203
 204        if (WARN_ON_ONCE(!config))
 205                return;
 206
 207        /*
 208         * Taken verbatim from the TRM:
 209         *
 210         * To trace all memory:
 211         *  set bit [24] in register 0x009, the ETMTECR1, to 1
 212         *  set all other bits in register 0x009, the ETMTECR1, to 0
 213         *  set all bits in register 0x007, the ETMTECR2, to 0
 214         *  set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
 215         */
 216        config->enable_ctrl1 = BIT(24);
 217        config->enable_ctrl2 = 0x0;
 218        config->enable_event = ETM_HARD_WIRE_RES_A;
 219
 220        config->trigger_event = ETM_DEFAULT_EVENT_VAL;
 221        config->enable_event = ETM_HARD_WIRE_RES_A;
 222
 223        config->seq_12_event = ETM_DEFAULT_EVENT_VAL;
 224        config->seq_21_event = ETM_DEFAULT_EVENT_VAL;
 225        config->seq_23_event = ETM_DEFAULT_EVENT_VAL;
 226        config->seq_31_event = ETM_DEFAULT_EVENT_VAL;
 227        config->seq_32_event = ETM_DEFAULT_EVENT_VAL;
 228        config->seq_13_event = ETM_DEFAULT_EVENT_VAL;
 229        config->timestamp_event = ETM_DEFAULT_EVENT_VAL;
 230
 231        for (i = 0; i < ETM_MAX_CNTR; i++) {
 232                config->cntr_rld_val[i] = 0x0;
 233                config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
 234                config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
 235                config->cntr_val[i] = 0x0;
 236        }
 237
 238        config->seq_curr_state = 0x0;
 239        config->ctxid_idx = 0x0;
 240        for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
 241                config->ctxid_pid[i] = 0x0;
 242                config->ctxid_vpid[i] = 0x0;
 243        }
 244
 245        config->ctxid_mask = 0x0;
 246        /* Setting default to 1024 as per TRM recommendation */
 247        config->sync_freq = 0x400;
 248}
 249
 250void etm_config_trace_mode(struct etm_config *config)
 251{
 252        u32 flags, mode;
 253
 254        mode = config->mode;
 255
 256        mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
 257
 258        /* excluding kernel AND user space doesn't make sense */
 259        if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
 260                return;
 261
 262        /* nothing to do if neither flags are set */
 263        if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
 264                return;
 265
 266        flags = (1 << 0 |       /* instruction execute */
 267                 3 << 3 |       /* ARM instruction */
 268                 0 << 5 |       /* No data value comparison */
 269                 0 << 7 |       /* No exact mach */
 270                 0 << 8);       /* Ignore context ID */
 271
 272        /* No need to worry about single address comparators. */
 273        config->enable_ctrl2 = 0x0;
 274
 275        /* Bit 0 is address range comparator 1 */
 276        config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
 277
 278        /*
 279         * On ETMv3.5:
 280         * ETMACTRn[13,11] == Non-secure state comparison control
 281         * ETMACTRn[12,10] == Secure state comparison control
 282         *
 283         * b00 == Match in all modes in this state
 284         * b01 == Do not match in any more in this state
 285         * b10 == Match in all modes excepts user mode in this state
 286         * b11 == Match only in user mode in this state
 287         */
 288
 289        /* Tracing in secure mode is not supported at this time */
 290        flags |= (0 << 12 | 1 << 10);
 291
 292        if (mode & ETM_MODE_EXCL_USER) {
 293                /* exclude user, match all modes except user mode */
 294                flags |= (1 << 13 | 0 << 11);
 295        } else {
 296                /* exclude kernel, match only in user mode */
 297                flags |= (1 << 13 | 1 << 11);
 298        }
 299
 300        /*
 301         * The ETMEEVR register is already set to "hard wire A".  As such
 302         * all there is to do is setup an address comparator that spans
 303         * the entire address range and configure the state and mode bits.
 304         */
 305        config->addr_val[0] = (u32) 0x0;
 306        config->addr_val[1] = (u32) ~0x0;
 307        config->addr_acctype[0] = flags;
 308        config->addr_acctype[1] = flags;
 309        config->addr_type[0] = ETM_ADDR_TYPE_RANGE;
 310        config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
 311}
 312
 313#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | \
 314                                 ETMCR_TIMESTAMP_EN | \
 315                                 ETMCR_RETURN_STACK)
 316
 317static int etm_parse_event_config(struct etm_drvdata *drvdata,
 318                                  struct perf_event *event)
 319{
 320        struct etm_config *config = &drvdata->config;
 321        struct perf_event_attr *attr = &event->attr;
 322
 323        if (!attr)
 324                return -EINVAL;
 325
 326        /* Clear configuration from previous run */
 327        memset(config, 0, sizeof(struct etm_config));
 328
 329        if (attr->exclude_kernel)
 330                config->mode = ETM_MODE_EXCL_KERN;
 331
 332        if (attr->exclude_user)
 333                config->mode = ETM_MODE_EXCL_USER;
 334
 335        /* Always start from the default config */
 336        etm_set_default(config);
 337
 338        /*
 339         * By default the tracers are configured to trace the whole address
 340         * range.  Narrow the field only if requested by user space.
 341         */
 342        if (config->mode)
 343                etm_config_trace_mode(config);
 344
 345        /*
 346         * At this time only cycle accurate, return stack  and timestamp
 347         * options are available.
 348         */
 349        if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
 350                return -EINVAL;
 351
 352        config->ctrl = attr->config;
 353
 354        /*
 355         * Possible to have cores with PTM (supports ret stack) and ETM
 356         * (never has ret stack) on the same SoC. So if we have a request
 357         * for return stack that can't be honoured on this core then
 358         * clear the bit - trace will still continue normally
 359         */
 360        if ((config->ctrl & ETMCR_RETURN_STACK) &&
 361            !(drvdata->etmccer & ETMCCER_RETSTACK))
 362                config->ctrl &= ~ETMCR_RETURN_STACK;
 363
 364        return 0;
 365}
 366
 367static void etm_enable_hw(void *info)
 368{
 369        int i;
 370        u32 etmcr;
 371        struct etm_drvdata *drvdata = info;
 372        struct etm_config *config = &drvdata->config;
 373
 374        CS_UNLOCK(drvdata->base);
 375
 376        /* Turn engine on */
 377        etm_clr_pwrdwn(drvdata);
 378        /* Apply power to trace registers */
 379        etm_set_pwrup(drvdata);
 380        /* Make sure all registers are accessible */
 381        etm_os_unlock(drvdata);
 382
 383        etm_set_prog(drvdata);
 384
 385        etmcr = etm_readl(drvdata, ETMCR);
 386        /* Clear setting from a previous run if need be */
 387        etmcr &= ~ETM3X_SUPPORTED_OPTIONS;
 388        etmcr |= drvdata->port_size;
 389        etmcr |= ETMCR_ETM_EN;
 390        etm_writel(drvdata, config->ctrl | etmcr, ETMCR);
 391        etm_writel(drvdata, config->trigger_event, ETMTRIGGER);
 392        etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR);
 393        etm_writel(drvdata, config->enable_event, ETMTEEVR);
 394        etm_writel(drvdata, config->enable_ctrl1, ETMTECR1);
 395        etm_writel(drvdata, config->fifofull_level, ETMFFLR);
 396        for (i = 0; i < drvdata->nr_addr_cmp; i++) {
 397                etm_writel(drvdata, config->addr_val[i], ETMACVRn(i));
 398                etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i));
 399        }
 400        for (i = 0; i < drvdata->nr_cntr; i++) {
 401                etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i));
 402                etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i));
 403                etm_writel(drvdata, config->cntr_rld_event[i],
 404                           ETMCNTRLDEVRn(i));
 405                etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i));
 406        }
 407        etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR);
 408        etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR);
 409        etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR);
 410        etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR);
 411        etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR);
 412        etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR);
 413        etm_writel(drvdata, config->seq_curr_state, ETMSQR);
 414        for (i = 0; i < drvdata->nr_ext_out; i++)
 415                etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
 416        for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
 417                etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i));
 418        etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR);
 419        etm_writel(drvdata, config->sync_freq, ETMSYNCFR);
 420        /* No external input selected */
 421        etm_writel(drvdata, 0x0, ETMEXTINSELR);
 422        etm_writel(drvdata, config->timestamp_event, ETMTSEVR);
 423        /* No auxiliary control selected */
 424        etm_writel(drvdata, 0x0, ETMAUXCR);
 425        etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
 426        /* No VMID comparator value selected */
 427        etm_writel(drvdata, 0x0, ETMVMIDCVR);
 428
 429        etm_clr_prog(drvdata);
 430        CS_LOCK(drvdata->base);
 431
 432        dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
 433}
 434
 435static int etm_cpu_id(struct coresight_device *csdev)
 436{
 437        struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 438
 439        return drvdata->cpu;
 440}
 441
 442int etm_get_trace_id(struct etm_drvdata *drvdata)
 443{
 444        unsigned long flags;
 445        int trace_id = -1;
 446
 447        if (!drvdata)
 448                goto out;
 449
 450        if (!local_read(&drvdata->mode))
 451                return drvdata->traceid;
 452
 453        pm_runtime_get_sync(drvdata->dev);
 454
 455        spin_lock_irqsave(&drvdata->spinlock, flags);
 456
 457        CS_UNLOCK(drvdata->base);
 458        trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
 459        CS_LOCK(drvdata->base);
 460
 461        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 462        pm_runtime_put(drvdata->dev);
 463
 464out:
 465        return trace_id;
 466
 467}
 468
 469static int etm_trace_id(struct coresight_device *csdev)
 470{
 471        struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 472
 473        return etm_get_trace_id(drvdata);
 474}
 475
 476static int etm_enable_perf(struct coresight_device *csdev,
 477                           struct perf_event *event)
 478{
 479        struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 480
 481        if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
 482                return -EINVAL;
 483
 484        /* Configure the tracer based on the session's specifics */
 485        etm_parse_event_config(drvdata, event);
 486        /* And enable it */
 487        etm_enable_hw(drvdata);
 488
 489        return 0;
 490}
 491
 492static int etm_enable_sysfs(struct coresight_device *csdev)
 493{
 494        struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 495        int ret;
 496
 497        spin_lock(&drvdata->spinlock);
 498
 499        /*
 500         * Configure the ETM only if the CPU is online.  If it isn't online
 501         * hw configuration will take place on the local CPU during bring up.
 502         */
 503        if (cpu_online(drvdata->cpu)) {
 504                ret = smp_call_function_single(drvdata->cpu,
 505                                               etm_enable_hw, drvdata, 1);
 506                if (ret)
 507                        goto err;
 508        }
 509
 510        drvdata->sticky_enable = true;
 511        spin_unlock(&drvdata->spinlock);
 512
 513        dev_info(drvdata->dev, "ETM tracing enabled\n");
 514        return 0;
 515
 516err:
 517        spin_unlock(&drvdata->spinlock);
 518        return ret;
 519}
 520
 521static int etm_enable(struct coresight_device *csdev,
 522                      struct perf_event *event, u32 mode)
 523{
 524        int ret;
 525        u32 val;
 526        struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 527
 528        val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
 529
 530        /* Someone is already using the tracer */
 531        if (val)
 532                return -EBUSY;
 533
 534        switch (mode) {
 535        case CS_MODE_SYSFS:
 536                ret = etm_enable_sysfs(csdev);
 537                break;
 538        case CS_MODE_PERF:
 539                ret = etm_enable_perf(csdev, event);
 540                break;
 541        default:
 542                ret = -EINVAL;
 543        }
 544
 545        /* The tracer didn't start */
 546        if (ret)
 547                local_set(&drvdata->mode, CS_MODE_DISABLED);
 548
 549        return ret;
 550}
 551
 552static void etm_disable_hw(void *info)
 553{
 554        int i;
 555        struct etm_drvdata *drvdata = info;
 556        struct etm_config *config = &drvdata->config;
 557
 558        CS_UNLOCK(drvdata->base);
 559        etm_set_prog(drvdata);
 560
 561        /* Read back sequencer and counters for post trace analysis */
 562        config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
 563
 564        for (i = 0; i < drvdata->nr_cntr; i++)
 565                config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
 566
 567        etm_set_pwrdwn(drvdata);
 568        CS_LOCK(drvdata->base);
 569
 570        dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
 571}
 572
 573static void etm_disable_perf(struct coresight_device *csdev)
 574{
 575        struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 576
 577        if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
 578                return;
 579
 580        CS_UNLOCK(drvdata->base);
 581
 582        /* Setting the prog bit disables tracing immediately */
 583        etm_set_prog(drvdata);
 584
 585        /*
 586         * There is no way to know when the tracer will be used again so
 587         * power down the tracer.
 588         */
 589        etm_set_pwrdwn(drvdata);
 590
 591        CS_LOCK(drvdata->base);
 592}
 593
 594static void etm_disable_sysfs(struct coresight_device *csdev)
 595{
 596        struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 597
 598        /*
 599         * Taking hotplug lock here protects from clocks getting disabled
 600         * with tracing being left on (crash scenario) if user disable occurs
 601         * after cpu online mask indicates the cpu is offline but before the
 602         * DYING hotplug callback is serviced by the ETM driver.
 603         */
 604        cpus_read_lock();
 605        spin_lock(&drvdata->spinlock);
 606
 607        /*
 608         * Executing etm_disable_hw on the cpu whose ETM is being disabled
 609         * ensures that register writes occur when cpu is powered.
 610         */
 611        smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
 612
 613        spin_unlock(&drvdata->spinlock);
 614        cpus_read_unlock();
 615
 616        dev_info(drvdata->dev, "ETM tracing disabled\n");
 617}
 618
 619static void etm_disable(struct coresight_device *csdev,
 620                        struct perf_event *event)
 621{
 622        u32 mode;
 623        struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 624
 625        /*
 626         * For as long as the tracer isn't disabled another entity can't
 627         * change its status.  As such we can read the status here without
 628         * fearing it will change under us.
 629         */
 630        mode = local_read(&drvdata->mode);
 631
 632        switch (mode) {
 633        case CS_MODE_DISABLED:
 634                break;
 635        case CS_MODE_SYSFS:
 636                etm_disable_sysfs(csdev);
 637                break;
 638        case CS_MODE_PERF:
 639                etm_disable_perf(csdev);
 640                break;
 641        default:
 642                WARN_ON_ONCE(mode);
 643                return;
 644        }
 645
 646        if (mode)
 647                local_set(&drvdata->mode, CS_MODE_DISABLED);
 648}
 649
 650static const struct coresight_ops_source etm_source_ops = {
 651        .cpu_id         = etm_cpu_id,
 652        .trace_id       = etm_trace_id,
 653        .enable         = etm_enable,
 654        .disable        = etm_disable,
 655};
 656
 657static const struct coresight_ops etm_cs_ops = {
 658        .source_ops     = &etm_source_ops,
 659};
 660
 661static int etm_online_cpu(unsigned int cpu)
 662{
 663        if (!etmdrvdata[cpu])
 664                return 0;
 665
 666        if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
 667                coresight_enable(etmdrvdata[cpu]->csdev);
 668        return 0;
 669}
 670
 671static int etm_starting_cpu(unsigned int cpu)
 672{
 673        if (!etmdrvdata[cpu])
 674                return 0;
 675
 676        spin_lock(&etmdrvdata[cpu]->spinlock);
 677        if (!etmdrvdata[cpu]->os_unlock) {
 678                etm_os_unlock(etmdrvdata[cpu]);
 679                etmdrvdata[cpu]->os_unlock = true;
 680        }
 681
 682        if (local_read(&etmdrvdata[cpu]->mode))
 683                etm_enable_hw(etmdrvdata[cpu]);
 684        spin_unlock(&etmdrvdata[cpu]->spinlock);
 685        return 0;
 686}
 687
 688static int etm_dying_cpu(unsigned int cpu)
 689{
 690        if (!etmdrvdata[cpu])
 691                return 0;
 692
 693        spin_lock(&etmdrvdata[cpu]->spinlock);
 694        if (local_read(&etmdrvdata[cpu]->mode))
 695                etm_disable_hw(etmdrvdata[cpu]);
 696        spin_unlock(&etmdrvdata[cpu]->spinlock);
 697        return 0;
 698}
 699
 700static bool etm_arch_supported(u8 arch)
 701{
 702        switch (arch) {
 703        case ETM_ARCH_V3_3:
 704                break;
 705        case ETM_ARCH_V3_5:
 706                break;
 707        case PFT_ARCH_V1_0:
 708                break;
 709        case PFT_ARCH_V1_1:
 710                break;
 711        default:
 712                return false;
 713        }
 714        return true;
 715}
 716
 717static void etm_init_arch_data(void *info)
 718{
 719        u32 etmidr;
 720        u32 etmccr;
 721        struct etm_drvdata *drvdata = info;
 722
 723        /* Make sure all registers are accessible */
 724        etm_os_unlock(drvdata);
 725
 726        CS_UNLOCK(drvdata->base);
 727
 728        /* First dummy read */
 729        (void)etm_readl(drvdata, ETMPDSR);
 730        /* Provide power to ETM: ETMPDCR[3] == 1 */
 731        etm_set_pwrup(drvdata);
 732        /*
 733         * Clear power down bit since when this bit is set writes to
 734         * certain registers might be ignored.
 735         */
 736        etm_clr_pwrdwn(drvdata);
 737        /*
 738         * Set prog bit. It will be set from reset but this is included to
 739         * ensure it is set
 740         */
 741        etm_set_prog(drvdata);
 742
 743        /* Find all capabilities */
 744        etmidr = etm_readl(drvdata, ETMIDR);
 745        drvdata->arch = BMVAL(etmidr, 4, 11);
 746        drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
 747
 748        drvdata->etmccer = etm_readl(drvdata, ETMCCER);
 749        etmccr = etm_readl(drvdata, ETMCCR);
 750        drvdata->etmccr = etmccr;
 751        drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
 752        drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
 753        drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
 754        drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
 755        drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
 756
 757        etm_set_pwrdwn(drvdata);
 758        etm_clr_pwrup(drvdata);
 759        CS_LOCK(drvdata->base);
 760}
 761
 762static void etm_init_trace_id(struct etm_drvdata *drvdata)
 763{
 764        drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
 765}
 766
 767static int etm_probe(struct amba_device *adev, const struct amba_id *id)
 768{
 769        int ret;
 770        void __iomem *base;
 771        struct device *dev = &adev->dev;
 772        struct coresight_platform_data *pdata = NULL;
 773        struct etm_drvdata *drvdata;
 774        struct resource *res = &adev->res;
 775        struct coresight_desc desc = { 0 };
 776        struct device_node *np = adev->dev.of_node;
 777
 778        drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
 779        if (!drvdata)
 780                return -ENOMEM;
 781
 782        if (np) {
 783                pdata = of_get_coresight_platform_data(dev, np);
 784                if (IS_ERR(pdata))
 785                        return PTR_ERR(pdata);
 786
 787                adev->dev.platform_data = pdata;
 788                drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
 789        }
 790
 791        drvdata->dev = &adev->dev;
 792        dev_set_drvdata(dev, drvdata);
 793
 794        /* Validity for the resource is already checked by the AMBA core */
 795        base = devm_ioremap_resource(dev, res);
 796        if (IS_ERR(base))
 797                return PTR_ERR(base);
 798
 799        drvdata->base = base;
 800
 801        spin_lock_init(&drvdata->spinlock);
 802
 803        drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
 804        if (!IS_ERR(drvdata->atclk)) {
 805                ret = clk_prepare_enable(drvdata->atclk);
 806                if (ret)
 807                        return ret;
 808        }
 809
 810        drvdata->cpu = pdata ? pdata->cpu : 0;
 811
 812        cpus_read_lock();
 813        etmdrvdata[drvdata->cpu] = drvdata;
 814
 815        if (smp_call_function_single(drvdata->cpu,
 816                                     etm_init_arch_data,  drvdata, 1))
 817                dev_err(dev, "ETM arch init failed\n");
 818
 819        if (!etm_count++) {
 820                cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
 821                                                     "arm/coresight:starting",
 822                                                     etm_starting_cpu, etm_dying_cpu);
 823                ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
 824                                                           "arm/coresight:online",
 825                                                           etm_online_cpu, NULL);
 826                if (ret < 0)
 827                        goto err_arch_supported;
 828                hp_online = ret;
 829        }
 830        cpus_read_unlock();
 831
 832        if (etm_arch_supported(drvdata->arch) == false) {
 833                ret = -EINVAL;
 834                goto err_arch_supported;
 835        }
 836
 837        etm_init_trace_id(drvdata);
 838        etm_set_default(&drvdata->config);
 839
 840        desc.type = CORESIGHT_DEV_TYPE_SOURCE;
 841        desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
 842        desc.ops = &etm_cs_ops;
 843        desc.pdata = pdata;
 844        desc.dev = dev;
 845        desc.groups = coresight_etm_groups;
 846        drvdata->csdev = coresight_register(&desc);
 847        if (IS_ERR(drvdata->csdev)) {
 848                ret = PTR_ERR(drvdata->csdev);
 849                goto err_arch_supported;
 850        }
 851
 852        ret = etm_perf_symlink(drvdata->csdev, true);
 853        if (ret) {
 854                coresight_unregister(drvdata->csdev);
 855                goto err_arch_supported;
 856        }
 857
 858        pm_runtime_put(&adev->dev);
 859        dev_info(dev, "%s initialized\n", (char *)id->data);
 860        if (boot_enable) {
 861                coresight_enable(drvdata->csdev);
 862                drvdata->boot_enable = true;
 863        }
 864
 865        return 0;
 866
 867err_arch_supported:
 868        if (--etm_count == 0) {
 869                cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
 870                if (hp_online)
 871                        cpuhp_remove_state_nocalls(hp_online);
 872        }
 873        return ret;
 874}
 875
 876#ifdef CONFIG_PM
 877static int etm_runtime_suspend(struct device *dev)
 878{
 879        struct etm_drvdata *drvdata = dev_get_drvdata(dev);
 880
 881        if (drvdata && !IS_ERR(drvdata->atclk))
 882                clk_disable_unprepare(drvdata->atclk);
 883
 884        return 0;
 885}
 886
 887static int etm_runtime_resume(struct device *dev)
 888{
 889        struct etm_drvdata *drvdata = dev_get_drvdata(dev);
 890
 891        if (drvdata && !IS_ERR(drvdata->atclk))
 892                clk_prepare_enable(drvdata->atclk);
 893
 894        return 0;
 895}
 896#endif
 897
 898static const struct dev_pm_ops etm_dev_pm_ops = {
 899        SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
 900};
 901
 902static const struct amba_id etm_ids[] = {
 903        {       /* ETM 3.3 */
 904                .id     = 0x000bb921,
 905                .mask   = 0x000fffff,
 906                .data   = "ETM 3.3",
 907        },
 908        {       /* ETM 3.5 - Cortex-A5 */
 909                .id     = 0x000bb955,
 910                .mask   = 0x000fffff,
 911                .data   = "ETM 3.5",
 912        },
 913        {       /* ETM 3.5 */
 914                .id     = 0x000bb956,
 915                .mask   = 0x000fffff,
 916                .data   = "ETM 3.5",
 917        },
 918        {       /* PTM 1.0 */
 919                .id     = 0x000bb950,
 920                .mask   = 0x000fffff,
 921                .data   = "PTM 1.0",
 922        },
 923        {       /* PTM 1.1 */
 924                .id     = 0x000bb95f,
 925                .mask   = 0x000fffff,
 926                .data   = "PTM 1.1",
 927        },
 928        {       /* PTM 1.1 Qualcomm */
 929                .id     = 0x000b006f,
 930                .mask   = 0x000fffff,
 931                .data   = "PTM 1.1",
 932        },
 933        { 0, 0},
 934};
 935
 936static struct amba_driver etm_driver = {
 937        .drv = {
 938                .name   = "coresight-etm3x",
 939                .owner  = THIS_MODULE,
 940                .pm     = &etm_dev_pm_ops,
 941                .suppress_bind_attrs = true,
 942        },
 943        .probe          = etm_probe,
 944        .id_table       = etm_ids,
 945};
 946builtin_amba_driver(etm_driver);
 947