linux/drivers/hwtracing/coresight/coresight-tmc-etf.c
<<
>>
Prefs
   1/*
   2 * Copyright(C) 2016 Linaro Limited. All rights reserved.
   3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include <linux/circ_buf.h>
  19#include <linux/coresight.h>
  20#include <linux/perf_event.h>
  21#include <linux/slab.h>
  22#include "coresight-priv.h"
  23#include "coresight-tmc.h"
  24
  25static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
  26{
  27        CS_UNLOCK(drvdata->base);
  28
  29        /* Wait for TMCSReady bit to be set */
  30        tmc_wait_for_tmcready(drvdata);
  31
  32        writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
  33        writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
  34                       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
  35                       TMC_FFCR_TRIGON_TRIGIN,
  36                       drvdata->base + TMC_FFCR);
  37
  38        writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
  39        tmc_enable_hw(drvdata);
  40
  41        CS_LOCK(drvdata->base);
  42}
  43
  44static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
  45{
  46        bool lost = false;
  47        char *bufp;
  48        const u32 *barrier;
  49        u32 read_data, status;
  50        int i;
  51
  52        /*
  53         * Get a hold of the status register and see if a wrap around
  54         * has occurred.
  55         */
  56        status = readl_relaxed(drvdata->base + TMC_STS);
  57        if (status & TMC_STS_FULL)
  58                lost = true;
  59
  60        bufp = drvdata->buf;
  61        drvdata->len = 0;
  62        barrier = barrier_pkt;
  63        while (1) {
  64                for (i = 0; i < drvdata->memwidth; i++) {
  65                        read_data = readl_relaxed(drvdata->base + TMC_RRD);
  66                        if (read_data == 0xFFFFFFFF)
  67                                return;
  68
  69                        if (lost && *barrier) {
  70                                read_data = *barrier;
  71                                barrier++;
  72                        }
  73
  74                        memcpy(bufp, &read_data, 4);
  75                        bufp += 4;
  76                        drvdata->len += 4;
  77                }
  78        }
  79}
  80
  81static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
  82{
  83        CS_UNLOCK(drvdata->base);
  84
  85        tmc_flush_and_stop(drvdata);
  86        /*
  87         * When operating in sysFS mode the content of the buffer needs to be
  88         * read before the TMC is disabled.
  89         */
  90        if (drvdata->mode == CS_MODE_SYSFS)
  91                tmc_etb_dump_hw(drvdata);
  92        tmc_disable_hw(drvdata);
  93
  94        CS_LOCK(drvdata->base);
  95}
  96
  97static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
  98{
  99        CS_UNLOCK(drvdata->base);
 100
 101        /* Wait for TMCSReady bit to be set */
 102        tmc_wait_for_tmcready(drvdata);
 103
 104        writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
 105        writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
 106                       drvdata->base + TMC_FFCR);
 107        writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
 108        tmc_enable_hw(drvdata);
 109
 110        CS_LOCK(drvdata->base);
 111}
 112
 113static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
 114{
 115        CS_UNLOCK(drvdata->base);
 116
 117        tmc_flush_and_stop(drvdata);
 118        tmc_disable_hw(drvdata);
 119
 120        CS_LOCK(drvdata->base);
 121}
 122
 123static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
 124{
 125        int ret = 0;
 126        bool used = false;
 127        char *buf = NULL;
 128        unsigned long flags;
 129        struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 130
 131        /*
 132         * If we don't have a buffer release the lock and allocate memory.
 133         * Otherwise keep the lock and move along.
 134         */
 135        spin_lock_irqsave(&drvdata->spinlock, flags);
 136        if (!drvdata->buf) {
 137                spin_unlock_irqrestore(&drvdata->spinlock, flags);
 138
 139                /* Allocating the memory here while outside of the spinlock */
 140                buf = kzalloc(drvdata->size, GFP_KERNEL);
 141                if (!buf)
 142                        return -ENOMEM;
 143
 144                /* Let's try again */
 145                spin_lock_irqsave(&drvdata->spinlock, flags);
 146        }
 147
 148        if (drvdata->reading) {
 149                ret = -EBUSY;
 150                goto out;
 151        }
 152
 153        /*
 154         * In sysFS mode we can have multiple writers per sink.  Since this
 155         * sink is already enabled no memory is needed and the HW need not be
 156         * touched.
 157         */
 158        if (drvdata->mode == CS_MODE_SYSFS)
 159                goto out;
 160
 161        /*
 162         * If drvdata::buf isn't NULL, memory was allocated for a previous
 163         * trace run but wasn't read.  If so simply zero-out the memory.
 164         * Otherwise use the memory allocated above.
 165         *
 166         * The memory is freed when users read the buffer using the
 167         * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
 168         * details.
 169         */
 170        if (drvdata->buf) {
 171                memset(drvdata->buf, 0, drvdata->size);
 172        } else {
 173                used = true;
 174                drvdata->buf = buf;
 175        }
 176
 177        drvdata->mode = CS_MODE_SYSFS;
 178        tmc_etb_enable_hw(drvdata);
 179out:
 180        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 181
 182        /* Free memory outside the spinlock if need be */
 183        if (!used)
 184                kfree(buf);
 185
 186        return ret;
 187}
 188
 189static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
 190{
 191        int ret = 0;
 192        unsigned long flags;
 193        struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 194
 195        spin_lock_irqsave(&drvdata->spinlock, flags);
 196        if (drvdata->reading) {
 197                ret = -EINVAL;
 198                goto out;
 199        }
 200
 201        /*
 202         * In Perf mode there can be only one writer per sink.  There
 203         * is also no need to continue if the ETB/ETR is already operated
 204         * from sysFS.
 205         */
 206        if (drvdata->mode != CS_MODE_DISABLED) {
 207                ret = -EINVAL;
 208                goto out;
 209        }
 210
 211        drvdata->mode = CS_MODE_PERF;
 212        tmc_etb_enable_hw(drvdata);
 213out:
 214        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 215
 216        return ret;
 217}
 218
 219static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
 220{
 221        int ret;
 222        struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 223
 224        switch (mode) {
 225        case CS_MODE_SYSFS:
 226                ret = tmc_enable_etf_sink_sysfs(csdev);
 227                break;
 228        case CS_MODE_PERF:
 229                ret = tmc_enable_etf_sink_perf(csdev);
 230                break;
 231        /* We shouldn't be here */
 232        default:
 233                ret = -EINVAL;
 234                break;
 235        }
 236
 237        if (ret)
 238                return ret;
 239
 240        dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
 241        return 0;
 242}
 243
 244static void tmc_disable_etf_sink(struct coresight_device *csdev)
 245{
 246        unsigned long flags;
 247        struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 248
 249        spin_lock_irqsave(&drvdata->spinlock, flags);
 250        if (drvdata->reading) {
 251                spin_unlock_irqrestore(&drvdata->spinlock, flags);
 252                return;
 253        }
 254
 255        /* Disable the TMC only if it needs to */
 256        if (drvdata->mode != CS_MODE_DISABLED) {
 257                tmc_etb_disable_hw(drvdata);
 258                drvdata->mode = CS_MODE_DISABLED;
 259        }
 260
 261        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 262
 263        dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
 264}
 265
 266static int tmc_enable_etf_link(struct coresight_device *csdev,
 267                               int inport, int outport)
 268{
 269        unsigned long flags;
 270        struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 271
 272        spin_lock_irqsave(&drvdata->spinlock, flags);
 273        if (drvdata->reading) {
 274                spin_unlock_irqrestore(&drvdata->spinlock, flags);
 275                return -EBUSY;
 276        }
 277
 278        tmc_etf_enable_hw(drvdata);
 279        drvdata->mode = CS_MODE_SYSFS;
 280        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 281
 282        dev_info(drvdata->dev, "TMC-ETF enabled\n");
 283        return 0;
 284}
 285
 286static void tmc_disable_etf_link(struct coresight_device *csdev,
 287                                 int inport, int outport)
 288{
 289        unsigned long flags;
 290        struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 291
 292        spin_lock_irqsave(&drvdata->spinlock, flags);
 293        if (drvdata->reading) {
 294                spin_unlock_irqrestore(&drvdata->spinlock, flags);
 295                return;
 296        }
 297
 298        tmc_etf_disable_hw(drvdata);
 299        drvdata->mode = CS_MODE_DISABLED;
 300        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 301
 302        dev_info(drvdata->dev, "TMC-ETF disabled\n");
 303}
 304
 305static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
 306                                  void **pages, int nr_pages, bool overwrite)
 307{
 308        int node;
 309        struct cs_buffers *buf;
 310
 311        if (cpu == -1)
 312                cpu = smp_processor_id();
 313        node = cpu_to_node(cpu);
 314
 315        /* Allocate memory structure for interaction with Perf */
 316        buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
 317        if (!buf)
 318                return NULL;
 319
 320        buf->snapshot = overwrite;
 321        buf->nr_pages = nr_pages;
 322        buf->data_pages = pages;
 323
 324        return buf;
 325}
 326
 327static void tmc_free_etf_buffer(void *config)
 328{
 329        struct cs_buffers *buf = config;
 330
 331        kfree(buf);
 332}
 333
 334static int tmc_set_etf_buffer(struct coresight_device *csdev,
 335                              struct perf_output_handle *handle,
 336                              void *sink_config)
 337{
 338        int ret = 0;
 339        unsigned long head;
 340        struct cs_buffers *buf = sink_config;
 341
 342        /* wrap head around to the amount of space we have */
 343        head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
 344
 345        /* find the page to write to */
 346        buf->cur = head / PAGE_SIZE;
 347
 348        /* and offset within that page */
 349        buf->offset = head % PAGE_SIZE;
 350
 351        local_set(&buf->data_size, 0);
 352
 353        return ret;
 354}
 355
 356static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
 357                                          struct perf_output_handle *handle,
 358                                          void *sink_config)
 359{
 360        long size = 0;
 361        struct cs_buffers *buf = sink_config;
 362
 363        if (buf) {
 364                /*
 365                 * In snapshot mode ->data_size holds the new address of the
 366                 * ring buffer's head.  The size itself is the whole address
 367                 * range since we want the latest information.
 368                 */
 369                if (buf->snapshot)
 370                        handle->head = local_xchg(&buf->data_size,
 371                                                  buf->nr_pages << PAGE_SHIFT);
 372                /*
 373                 * Tell the tracer PMU how much we got in this run and if
 374                 * something went wrong along the way.  Nobody else can use
 375                 * this cs_buffers instance until we are done.  As such
 376                 * resetting parameters here and squaring off with the ring
 377                 * buffer API in the tracer PMU is fine.
 378                 */
 379                size = local_xchg(&buf->data_size, 0);
 380        }
 381
 382        return size;
 383}
 384
 385static void tmc_update_etf_buffer(struct coresight_device *csdev,
 386                                  struct perf_output_handle *handle,
 387                                  void *sink_config)
 388{
 389        bool lost = false;
 390        int i, cur;
 391        const u32 *barrier;
 392        u32 *buf_ptr;
 393        u64 read_ptr, write_ptr;
 394        u32 status, to_read;
 395        unsigned long offset;
 396        struct cs_buffers *buf = sink_config;
 397        struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 398
 399        if (!buf)
 400                return;
 401
 402        /* This shouldn't happen */
 403        if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
 404                return;
 405
 406        CS_UNLOCK(drvdata->base);
 407
 408        tmc_flush_and_stop(drvdata);
 409
 410        read_ptr = tmc_read_rrp(drvdata);
 411        write_ptr = tmc_read_rwp(drvdata);
 412
 413        /*
 414         * Get a hold of the status register and see if a wrap around
 415         * has occurred.  If so adjust things accordingly.
 416         */
 417        status = readl_relaxed(drvdata->base + TMC_STS);
 418        if (status & TMC_STS_FULL) {
 419                lost = true;
 420                to_read = drvdata->size;
 421        } else {
 422                to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
 423        }
 424
 425        /*
 426         * The TMC RAM buffer may be bigger than the space available in the
 427         * perf ring buffer (handle->size).  If so advance the RRP so that we
 428         * get the latest trace data.
 429         */
 430        if (to_read > handle->size) {
 431                u32 mask = 0;
 432
 433                /*
 434                 * The value written to RRP must be byte-address aligned to
 435                 * the width of the trace memory databus _and_ to a frame
 436                 * boundary (16 byte), whichever is the biggest. For example,
 437                 * for 32-bit, 64-bit and 128-bit wide trace memory, the four
 438                 * LSBs must be 0s. For 256-bit wide trace memory, the five
 439                 * LSBs must be 0s.
 440                 */
 441                switch (drvdata->memwidth) {
 442                case TMC_MEM_INTF_WIDTH_32BITS:
 443                case TMC_MEM_INTF_WIDTH_64BITS:
 444                case TMC_MEM_INTF_WIDTH_128BITS:
 445                        mask = GENMASK(31, 5);
 446                        break;
 447                case TMC_MEM_INTF_WIDTH_256BITS:
 448                        mask = GENMASK(31, 6);
 449                        break;
 450                }
 451
 452                /*
 453                 * Make sure the new size is aligned in accordance with the
 454                 * requirement explained above.
 455                 */
 456                to_read = handle->size & mask;
 457                /* Move the RAM read pointer up */
 458                read_ptr = (write_ptr + drvdata->size) - to_read;
 459                /* Make sure we are still within our limits */
 460                if (read_ptr > (drvdata->size - 1))
 461                        read_ptr -= drvdata->size;
 462                /* Tell the HW */
 463                tmc_write_rrp(drvdata, read_ptr);
 464                lost = true;
 465        }
 466
 467        if (lost)
 468                perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
 469
 470        cur = buf->cur;
 471        offset = buf->offset;
 472        barrier = barrier_pkt;
 473
 474        /* for every byte to read */
 475        for (i = 0; i < to_read; i += 4) {
 476                buf_ptr = buf->data_pages[cur] + offset;
 477                *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
 478
 479                if (lost && *barrier) {
 480                        *buf_ptr = *barrier;
 481                        barrier++;
 482                }
 483
 484                offset += 4;
 485                if (offset >= PAGE_SIZE) {
 486                        offset = 0;
 487                        cur++;
 488                        /* wrap around at the end of the buffer */
 489                        cur &= buf->nr_pages - 1;
 490                }
 491        }
 492
 493        /*
 494         * In snapshot mode all we have to do is communicate to
 495         * perf_aux_output_end() the address of the current head.  In full
 496         * trace mode the same function expects a size to move rb->aux_head
 497         * forward.
 498         */
 499        if (buf->snapshot)
 500                local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
 501        else
 502                local_add(to_read, &buf->data_size);
 503
 504        CS_LOCK(drvdata->base);
 505}
 506
 507static const struct coresight_ops_sink tmc_etf_sink_ops = {
 508        .enable         = tmc_enable_etf_sink,
 509        .disable        = tmc_disable_etf_sink,
 510        .alloc_buffer   = tmc_alloc_etf_buffer,
 511        .free_buffer    = tmc_free_etf_buffer,
 512        .set_buffer     = tmc_set_etf_buffer,
 513        .reset_buffer   = tmc_reset_etf_buffer,
 514        .update_buffer  = tmc_update_etf_buffer,
 515};
 516
 517static const struct coresight_ops_link tmc_etf_link_ops = {
 518        .enable         = tmc_enable_etf_link,
 519        .disable        = tmc_disable_etf_link,
 520};
 521
 522const struct coresight_ops tmc_etb_cs_ops = {
 523        .sink_ops       = &tmc_etf_sink_ops,
 524};
 525
 526const struct coresight_ops tmc_etf_cs_ops = {
 527        .sink_ops       = &tmc_etf_sink_ops,
 528        .link_ops       = &tmc_etf_link_ops,
 529};
 530
 531int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
 532{
 533        enum tmc_mode mode;
 534        int ret = 0;
 535        unsigned long flags;
 536
 537        /* config types are set a boot time and never change */
 538        if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
 539                         drvdata->config_type != TMC_CONFIG_TYPE_ETF))
 540                return -EINVAL;
 541
 542        spin_lock_irqsave(&drvdata->spinlock, flags);
 543
 544        if (drvdata->reading) {
 545                ret = -EBUSY;
 546                goto out;
 547        }
 548
 549        /* There is no point in reading a TMC in HW FIFO mode */
 550        mode = readl_relaxed(drvdata->base + TMC_MODE);
 551        if (mode != TMC_MODE_CIRCULAR_BUFFER) {
 552                ret = -EINVAL;
 553                goto out;
 554        }
 555
 556        /* Don't interfere if operated from Perf */
 557        if (drvdata->mode == CS_MODE_PERF) {
 558                ret = -EINVAL;
 559                goto out;
 560        }
 561
 562        /* If drvdata::buf is NULL the trace data has been read already */
 563        if (drvdata->buf == NULL) {
 564                ret = -EINVAL;
 565                goto out;
 566        }
 567
 568        /* Disable the TMC if need be */
 569        if (drvdata->mode == CS_MODE_SYSFS)
 570                tmc_etb_disable_hw(drvdata);
 571
 572        drvdata->reading = true;
 573out:
 574        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 575
 576        return ret;
 577}
 578
 579int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
 580{
 581        char *buf = NULL;
 582        enum tmc_mode mode;
 583        unsigned long flags;
 584
 585        /* config types are set a boot time and never change */
 586        if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
 587                         drvdata->config_type != TMC_CONFIG_TYPE_ETF))
 588                return -EINVAL;
 589
 590        spin_lock_irqsave(&drvdata->spinlock, flags);
 591
 592        /* There is no point in reading a TMC in HW FIFO mode */
 593        mode = readl_relaxed(drvdata->base + TMC_MODE);
 594        if (mode != TMC_MODE_CIRCULAR_BUFFER) {
 595                spin_unlock_irqrestore(&drvdata->spinlock, flags);
 596                return -EINVAL;
 597        }
 598
 599        /* Re-enable the TMC if need be */
 600        if (drvdata->mode == CS_MODE_SYSFS) {
 601                /*
 602                 * The trace run will continue with the same allocated trace
 603                 * buffer. As such zero-out the buffer so that we don't end
 604                 * up with stale data.
 605                 *
 606                 * Since the tracer is still enabled drvdata::buf
 607                 * can't be NULL.
 608                 */
 609                memset(drvdata->buf, 0, drvdata->size);
 610                tmc_etb_enable_hw(drvdata);
 611        } else {
 612                /*
 613                 * The ETB/ETF is not tracing and the buffer was just read.
 614                 * As such prepare to free the trace buffer.
 615                 */
 616                buf = drvdata->buf;
 617                drvdata->buf = NULL;
 618        }
 619
 620        drvdata->reading = false;
 621        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 622
 623        /*
 624         * Free allocated memory outside of the spinlock.  There is no need
 625         * to assert the validity of 'buf' since calling kfree(NULL) is safe.
 626         */
 627        kfree(buf);
 628
 629        return 0;
 630}
 631