linux/drivers/hwtracing/coresight/coresight-tmc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
   3 *
   4 * Description: CoreSight Trace Memory Controller driver
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/init.h>
   9#include <linux/types.h>
  10#include <linux/device.h>
  11#include <linux/idr.h>
  12#include <linux/io.h>
  13#include <linux/err.h>
  14#include <linux/fs.h>
  15#include <linux/miscdevice.h>
  16#include <linux/mutex.h>
  17#include <linux/property.h>
  18#include <linux/uaccess.h>
  19#include <linux/slab.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/spinlock.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/of.h>
  24#include <linux/coresight.h>
  25#include <linux/amba/bus.h>
  26
  27#include "coresight-priv.h"
  28#include "coresight-tmc.h"
  29
  30DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
  31DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
  32DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
  33
  34void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
  35{
  36        /* Ensure formatter, unformatter and hardware fifo are empty */
  37        if (coresight_timeout(drvdata->base,
  38                              TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
  39                dev_err(&drvdata->csdev->dev,
  40                        "timeout while waiting for TMC to be Ready\n");
  41        }
  42}
  43
  44void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
  45{
  46        u32 ffcr;
  47
  48        ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
  49        ffcr |= TMC_FFCR_STOP_ON_FLUSH;
  50        writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
  51        ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
  52        writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
  53        /* Ensure flush completes */
  54        if (coresight_timeout(drvdata->base,
  55                              TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
  56                dev_err(&drvdata->csdev->dev,
  57                "timeout while waiting for completion of Manual Flush\n");
  58        }
  59
  60        tmc_wait_for_tmcready(drvdata);
  61}
  62
  63void tmc_enable_hw(struct tmc_drvdata *drvdata)
  64{
  65        writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
  66}
  67
  68void tmc_disable_hw(struct tmc_drvdata *drvdata)
  69{
  70        writel_relaxed(0x0, drvdata->base + TMC_CTL);
  71}
  72
  73u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
  74{
  75        u32 mask = 0;
  76
  77        /*
  78         * When moving RRP or an offset address forward, the new values must
  79         * be byte-address aligned to the width of the trace memory databus
  80         * _and_ to a frame boundary (16 byte), whichever is the biggest. For
  81         * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
  82         * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
  83         * be 0s.
  84         */
  85        switch (drvdata->memwidth) {
  86        case TMC_MEM_INTF_WIDTH_32BITS:
  87        case TMC_MEM_INTF_WIDTH_64BITS:
  88        case TMC_MEM_INTF_WIDTH_128BITS:
  89                mask = GENMASK(31, 4);
  90                break;
  91        case TMC_MEM_INTF_WIDTH_256BITS:
  92                mask = GENMASK(31, 5);
  93                break;
  94        }
  95
  96        return mask;
  97}
  98
  99static int tmc_read_prepare(struct tmc_drvdata *drvdata)
 100{
 101        int ret = 0;
 102
 103        switch (drvdata->config_type) {
 104        case TMC_CONFIG_TYPE_ETB:
 105        case TMC_CONFIG_TYPE_ETF:
 106                ret = tmc_read_prepare_etb(drvdata);
 107                break;
 108        case TMC_CONFIG_TYPE_ETR:
 109                ret = tmc_read_prepare_etr(drvdata);
 110                break;
 111        default:
 112                ret = -EINVAL;
 113        }
 114
 115        if (!ret)
 116                dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
 117
 118        return ret;
 119}
 120
 121static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
 122{
 123        int ret = 0;
 124
 125        switch (drvdata->config_type) {
 126        case TMC_CONFIG_TYPE_ETB:
 127        case TMC_CONFIG_TYPE_ETF:
 128                ret = tmc_read_unprepare_etb(drvdata);
 129                break;
 130        case TMC_CONFIG_TYPE_ETR:
 131                ret = tmc_read_unprepare_etr(drvdata);
 132                break;
 133        default:
 134                ret = -EINVAL;
 135        }
 136
 137        if (!ret)
 138                dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
 139
 140        return ret;
 141}
 142
 143static int tmc_open(struct inode *inode, struct file *file)
 144{
 145        int ret;
 146        struct tmc_drvdata *drvdata = container_of(file->private_data,
 147                                                   struct tmc_drvdata, miscdev);
 148
 149        ret = tmc_read_prepare(drvdata);
 150        if (ret)
 151                return ret;
 152
 153        nonseekable_open(inode, file);
 154
 155        dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
 156        return 0;
 157}
 158
 159static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
 160                                          loff_t pos, size_t len, char **bufpp)
 161{
 162        switch (drvdata->config_type) {
 163        case TMC_CONFIG_TYPE_ETB:
 164        case TMC_CONFIG_TYPE_ETF:
 165                return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
 166        case TMC_CONFIG_TYPE_ETR:
 167                return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
 168        }
 169
 170        return -EINVAL;
 171}
 172
 173static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
 174                        loff_t *ppos)
 175{
 176        char *bufp;
 177        ssize_t actual;
 178        struct tmc_drvdata *drvdata = container_of(file->private_data,
 179                                                   struct tmc_drvdata, miscdev);
 180        actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
 181        if (actual <= 0)
 182                return 0;
 183
 184        if (copy_to_user(data, bufp, actual)) {
 185                dev_dbg(&drvdata->csdev->dev,
 186                        "%s: copy_to_user failed\n", __func__);
 187                return -EFAULT;
 188        }
 189
 190        *ppos += actual;
 191        dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
 192
 193        return actual;
 194}
 195
 196static int tmc_release(struct inode *inode, struct file *file)
 197{
 198        int ret;
 199        struct tmc_drvdata *drvdata = container_of(file->private_data,
 200                                                   struct tmc_drvdata, miscdev);
 201
 202        ret = tmc_read_unprepare(drvdata);
 203        if (ret)
 204                return ret;
 205
 206        dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
 207        return 0;
 208}
 209
 210static const struct file_operations tmc_fops = {
 211        .owner          = THIS_MODULE,
 212        .open           = tmc_open,
 213        .read           = tmc_read,
 214        .release        = tmc_release,
 215        .llseek         = no_llseek,
 216};
 217
 218static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
 219{
 220        enum tmc_mem_intf_width memwidth;
 221
 222        /*
 223         * Excerpt from the TRM:
 224         *
 225         * DEVID::MEMWIDTH[10:8]
 226         * 0x2 Memory interface databus is 32 bits wide.
 227         * 0x3 Memory interface databus is 64 bits wide.
 228         * 0x4 Memory interface databus is 128 bits wide.
 229         * 0x5 Memory interface databus is 256 bits wide.
 230         */
 231        switch (BMVAL(devid, 8, 10)) {
 232        case 0x2:
 233                memwidth = TMC_MEM_INTF_WIDTH_32BITS;
 234                break;
 235        case 0x3:
 236                memwidth = TMC_MEM_INTF_WIDTH_64BITS;
 237                break;
 238        case 0x4:
 239                memwidth = TMC_MEM_INTF_WIDTH_128BITS;
 240                break;
 241        case 0x5:
 242                memwidth = TMC_MEM_INTF_WIDTH_256BITS;
 243                break;
 244        default:
 245                memwidth = 0;
 246        }
 247
 248        return memwidth;
 249}
 250
 251#define coresight_tmc_reg(name, offset)                 \
 252        coresight_simple_reg32(struct tmc_drvdata, name, offset)
 253#define coresight_tmc_reg64(name, lo_off, hi_off)       \
 254        coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
 255
 256coresight_tmc_reg(rsz, TMC_RSZ);
 257coresight_tmc_reg(sts, TMC_STS);
 258coresight_tmc_reg(trg, TMC_TRG);
 259coresight_tmc_reg(ctl, TMC_CTL);
 260coresight_tmc_reg(ffsr, TMC_FFSR);
 261coresight_tmc_reg(ffcr, TMC_FFCR);
 262coresight_tmc_reg(mode, TMC_MODE);
 263coresight_tmc_reg(pscr, TMC_PSCR);
 264coresight_tmc_reg(axictl, TMC_AXICTL);
 265coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
 266coresight_tmc_reg(devid, CORESIGHT_DEVID);
 267coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
 268coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
 269coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
 270
 271static struct attribute *coresight_tmc_mgmt_attrs[] = {
 272        &dev_attr_rsz.attr,
 273        &dev_attr_sts.attr,
 274        &dev_attr_rrp.attr,
 275        &dev_attr_rwp.attr,
 276        &dev_attr_trg.attr,
 277        &dev_attr_ctl.attr,
 278        &dev_attr_ffsr.attr,
 279        &dev_attr_ffcr.attr,
 280        &dev_attr_mode.attr,
 281        &dev_attr_pscr.attr,
 282        &dev_attr_devid.attr,
 283        &dev_attr_dba.attr,
 284        &dev_attr_axictl.attr,
 285        &dev_attr_authstatus.attr,
 286        NULL,
 287};
 288
 289static ssize_t trigger_cntr_show(struct device *dev,
 290                                 struct device_attribute *attr, char *buf)
 291{
 292        struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
 293        unsigned long val = drvdata->trigger_cntr;
 294
 295        return sprintf(buf, "%#lx\n", val);
 296}
 297
 298static ssize_t trigger_cntr_store(struct device *dev,
 299                             struct device_attribute *attr,
 300                             const char *buf, size_t size)
 301{
 302        int ret;
 303        unsigned long val;
 304        struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
 305
 306        ret = kstrtoul(buf, 16, &val);
 307        if (ret)
 308                return ret;
 309
 310        drvdata->trigger_cntr = val;
 311        return size;
 312}
 313static DEVICE_ATTR_RW(trigger_cntr);
 314
 315static ssize_t buffer_size_show(struct device *dev,
 316                                struct device_attribute *attr, char *buf)
 317{
 318        struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
 319
 320        return sprintf(buf, "%#x\n", drvdata->size);
 321}
 322
 323static ssize_t buffer_size_store(struct device *dev,
 324                                 struct device_attribute *attr,
 325                                 const char *buf, size_t size)
 326{
 327        int ret;
 328        unsigned long val;
 329        struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
 330
 331        /* Only permitted for TMC-ETRs */
 332        if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
 333                return -EPERM;
 334
 335        ret = kstrtoul(buf, 0, &val);
 336        if (ret)
 337                return ret;
 338        /* The buffer size should be page aligned */
 339        if (val & (PAGE_SIZE - 1))
 340                return -EINVAL;
 341        drvdata->size = val;
 342        return size;
 343}
 344
 345static DEVICE_ATTR_RW(buffer_size);
 346
 347static struct attribute *coresight_tmc_attrs[] = {
 348        &dev_attr_trigger_cntr.attr,
 349        &dev_attr_buffer_size.attr,
 350        NULL,
 351};
 352
 353static const struct attribute_group coresight_tmc_group = {
 354        .attrs = coresight_tmc_attrs,
 355};
 356
 357static const struct attribute_group coresight_tmc_mgmt_group = {
 358        .attrs = coresight_tmc_mgmt_attrs,
 359        .name = "mgmt",
 360};
 361
 362static const struct attribute_group *coresight_tmc_groups[] = {
 363        &coresight_tmc_group,
 364        &coresight_tmc_mgmt_group,
 365        NULL,
 366};
 367
 368static inline bool tmc_etr_can_use_sg(struct device *dev)
 369{
 370        return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
 371}
 372
 373static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
 374{
 375        u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
 376
 377        return (auth & TMC_AUTH_NSID_MASK) == 0x3;
 378}
 379
 380/* Detect and initialise the capabilities of a TMC ETR */
 381static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
 382{
 383        int rc;
 384        u32 dma_mask = 0;
 385        struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
 386
 387        if (!tmc_etr_has_non_secure_access(drvdata))
 388                return -EACCES;
 389
 390        /* Set the unadvertised capabilities */
 391        tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
 392
 393        if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
 394                tmc_etr_set_cap(drvdata, TMC_ETR_SG);
 395
 396        /* Check if the AXI address width is available */
 397        if (devid & TMC_DEVID_AXIAW_VALID)
 398                dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
 399                                TMC_DEVID_AXIAW_MASK);
 400
 401        /*
 402         * Unless specified in the device configuration, ETR uses a 40-bit
 403         * AXI master in place of the embedded SRAM of ETB/ETF.
 404         */
 405        switch (dma_mask) {
 406        case 32:
 407        case 40:
 408        case 44:
 409        case 48:
 410        case 52:
 411                dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
 412                break;
 413        default:
 414                dma_mask = 40;
 415        }
 416
 417        rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
 418        if (rc)
 419                dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
 420        return rc;
 421}
 422
 423static u32 tmc_etr_get_default_buffer_size(struct device *dev)
 424{
 425        u32 size;
 426
 427        if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
 428                size = SZ_1M;
 429        return size;
 430}
 431
 432static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
 433{
 434        int ret = 0;
 435        u32 devid;
 436        void __iomem *base;
 437        struct device *dev = &adev->dev;
 438        struct coresight_platform_data *pdata = NULL;
 439        struct tmc_drvdata *drvdata;
 440        struct resource *res = &adev->res;
 441        struct coresight_desc desc = { 0 };
 442        struct coresight_dev_list *dev_list = NULL;
 443
 444        ret = -ENOMEM;
 445        drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
 446        if (!drvdata)
 447                goto out;
 448
 449        dev_set_drvdata(dev, drvdata);
 450
 451        /* Validity for the resource is already checked by the AMBA core */
 452        base = devm_ioremap_resource(dev, res);
 453        if (IS_ERR(base)) {
 454                ret = PTR_ERR(base);
 455                goto out;
 456        }
 457
 458        drvdata->base = base;
 459
 460        spin_lock_init(&drvdata->spinlock);
 461
 462        devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
 463        drvdata->config_type = BMVAL(devid, 6, 7);
 464        drvdata->memwidth = tmc_get_memwidth(devid);
 465        /* This device is not associated with a session */
 466        drvdata->pid = -1;
 467
 468        if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
 469                drvdata->size = tmc_etr_get_default_buffer_size(dev);
 470        else
 471                drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
 472
 473        desc.dev = dev;
 474        desc.groups = coresight_tmc_groups;
 475
 476        switch (drvdata->config_type) {
 477        case TMC_CONFIG_TYPE_ETB:
 478                desc.type = CORESIGHT_DEV_TYPE_SINK;
 479                desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
 480                desc.ops = &tmc_etb_cs_ops;
 481                dev_list = &etb_devs;
 482                break;
 483        case TMC_CONFIG_TYPE_ETR:
 484                desc.type = CORESIGHT_DEV_TYPE_SINK;
 485                desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
 486                desc.ops = &tmc_etr_cs_ops;
 487                ret = tmc_etr_setup_caps(dev, devid,
 488                                         coresight_get_uci_data(id));
 489                if (ret)
 490                        goto out;
 491                idr_init(&drvdata->idr);
 492                mutex_init(&drvdata->idr_mutex);
 493                dev_list = &etr_devs;
 494                break;
 495        case TMC_CONFIG_TYPE_ETF:
 496                desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
 497                desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
 498                desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
 499                desc.ops = &tmc_etf_cs_ops;
 500                dev_list = &etf_devs;
 501                break;
 502        default:
 503                pr_err("%s: Unsupported TMC config\n", desc.name);
 504                ret = -EINVAL;
 505                goto out;
 506        }
 507
 508        desc.name = coresight_alloc_device_name(dev_list, dev);
 509        if (!desc.name) {
 510                ret = -ENOMEM;
 511                goto out;
 512        }
 513
 514        pdata = coresight_get_platform_data(dev);
 515        if (IS_ERR(pdata)) {
 516                ret = PTR_ERR(pdata);
 517                goto out;
 518        }
 519        adev->dev.platform_data = pdata;
 520        desc.pdata = pdata;
 521
 522        drvdata->csdev = coresight_register(&desc);
 523        if (IS_ERR(drvdata->csdev)) {
 524                ret = PTR_ERR(drvdata->csdev);
 525                goto out;
 526        }
 527
 528        drvdata->miscdev.name = desc.name;
 529        drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
 530        drvdata->miscdev.fops = &tmc_fops;
 531        ret = misc_register(&drvdata->miscdev);
 532        if (ret)
 533                coresight_unregister(drvdata->csdev);
 534        else
 535                pm_runtime_put(&adev->dev);
 536out:
 537        return ret;
 538}
 539
 540static void tmc_shutdown(struct amba_device *adev)
 541{
 542        unsigned long flags;
 543        struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
 544
 545        spin_lock_irqsave(&drvdata->spinlock, flags);
 546
 547        if (drvdata->mode == CS_MODE_DISABLED)
 548                goto out;
 549
 550        if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
 551                tmc_etr_disable_hw(drvdata);
 552
 553        /*
 554         * We do not care about coresight unregister here unlike remove
 555         * callback which is required for making coresight modular since
 556         * the system is going down after this.
 557         */
 558out:
 559        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 560}
 561
 562static const struct amba_id tmc_ids[] = {
 563        CS_AMBA_ID(0x000bb961),
 564        /* Coresight SoC 600 TMC-ETR/ETS */
 565        CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
 566        /* Coresight SoC 600 TMC-ETB */
 567        CS_AMBA_ID(0x000bb9e9),
 568        /* Coresight SoC 600 TMC-ETF */
 569        CS_AMBA_ID(0x000bb9ea),
 570        { 0, 0},
 571};
 572
 573static struct amba_driver tmc_driver = {
 574        .drv = {
 575                .name   = "coresight-tmc",
 576                .owner  = THIS_MODULE,
 577                .suppress_bind_attrs = true,
 578        },
 579        .probe          = tmc_probe,
 580        .shutdown       = tmc_shutdown,
 581        .id_table       = tmc_ids,
 582};
 583builtin_amba_driver(tmc_driver);
 584