linux/drivers/hwtracing/coresight/coresight-etm-perf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
   4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
   5 */
   6
   7#include <linux/coresight.h>
   8#include <linux/coresight-pmu.h>
   9#include <linux/cpumask.h>
  10#include <linux/device.h>
  11#include <linux/list.h>
  12#include <linux/mm.h>
  13#include <linux/init.h>
  14#include <linux/perf_event.h>
  15#include <linux/percpu-defs.h>
  16#include <linux/slab.h>
  17#include <linux/stringhash.h>
  18#include <linux/types.h>
  19#include <linux/workqueue.h>
  20
  21#include "coresight-etm-perf.h"
  22#include "coresight-priv.h"
  23
  24static struct pmu etm_pmu;
  25static bool etm_perf_up;
  26
  27static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
  28static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
  29
  30/* ETMv3.5/PTM's ETMCR is 'config' */
  31PMU_FORMAT_ATTR(cycacc,         "config:" __stringify(ETM_OPT_CYCACC));
  32PMU_FORMAT_ATTR(contextid,      "config:" __stringify(ETM_OPT_CTXTID));
  33PMU_FORMAT_ATTR(timestamp,      "config:" __stringify(ETM_OPT_TS));
  34PMU_FORMAT_ATTR(retstack,       "config:" __stringify(ETM_OPT_RETSTK));
  35/* Sink ID - same for all ETMs */
  36PMU_FORMAT_ATTR(sinkid,         "config2:0-31");
  37
  38static struct attribute *etm_config_formats_attr[] = {
  39        &format_attr_cycacc.attr,
  40        &format_attr_contextid.attr,
  41        &format_attr_timestamp.attr,
  42        &format_attr_retstack.attr,
  43        &format_attr_sinkid.attr,
  44        NULL,
  45};
  46
  47static const struct attribute_group etm_pmu_format_group = {
  48        .name   = "format",
  49        .attrs  = etm_config_formats_attr,
  50};
  51
  52static struct attribute *etm_config_sinks_attr[] = {
  53        NULL,
  54};
  55
  56static const struct attribute_group etm_pmu_sinks_group = {
  57        .name   = "sinks",
  58        .attrs  = etm_config_sinks_attr,
  59};
  60
  61static const struct attribute_group *etm_pmu_attr_groups[] = {
  62        &etm_pmu_format_group,
  63        &etm_pmu_sinks_group,
  64        NULL,
  65};
  66
  67static inline struct list_head **
  68etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
  69{
  70        return per_cpu_ptr(data->path, cpu);
  71}
  72
  73static inline struct list_head *
  74etm_event_cpu_path(struct etm_event_data *data, int cpu)
  75{
  76        return *etm_event_cpu_path_ptr(data, cpu);
  77}
  78
  79static void etm_event_read(struct perf_event *event) {}
  80
  81static int etm_addr_filters_alloc(struct perf_event *event)
  82{
  83        struct etm_filters *filters;
  84        int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
  85
  86        filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
  87        if (!filters)
  88                return -ENOMEM;
  89
  90        if (event->parent)
  91                memcpy(filters, event->parent->hw.addr_filters,
  92                       sizeof(*filters));
  93
  94        event->hw.addr_filters = filters;
  95
  96        return 0;
  97}
  98
  99static void etm_event_destroy(struct perf_event *event)
 100{
 101        kfree(event->hw.addr_filters);
 102        event->hw.addr_filters = NULL;
 103}
 104
 105static int etm_event_init(struct perf_event *event)
 106{
 107        int ret = 0;
 108
 109        if (event->attr.type != etm_pmu.type) {
 110                ret = -ENOENT;
 111                goto out;
 112        }
 113
 114        ret = etm_addr_filters_alloc(event);
 115        if (ret)
 116                goto out;
 117
 118        event->destroy = etm_event_destroy;
 119out:
 120        return ret;
 121}
 122
 123static void free_sink_buffer(struct etm_event_data *event_data)
 124{
 125        int cpu;
 126        cpumask_t *mask = &event_data->mask;
 127        struct coresight_device *sink;
 128
 129        if (WARN_ON(cpumask_empty(mask)))
 130                return;
 131
 132        if (!event_data->snk_config)
 133                return;
 134
 135        cpu = cpumask_first(mask);
 136        sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
 137        sink_ops(sink)->free_buffer(event_data->snk_config);
 138}
 139
 140static void free_event_data(struct work_struct *work)
 141{
 142        int cpu;
 143        cpumask_t *mask;
 144        struct etm_event_data *event_data;
 145
 146        event_data = container_of(work, struct etm_event_data, work);
 147        mask = &event_data->mask;
 148
 149        /* Free the sink buffers, if there are any */
 150        free_sink_buffer(event_data);
 151
 152        for_each_cpu(cpu, mask) {
 153                struct list_head **ppath;
 154
 155                ppath = etm_event_cpu_path_ptr(event_data, cpu);
 156                if (!(IS_ERR_OR_NULL(*ppath)))
 157                        coresight_release_path(*ppath);
 158                *ppath = NULL;
 159        }
 160
 161        free_percpu(event_data->path);
 162        kfree(event_data);
 163}
 164
 165static void *alloc_event_data(int cpu)
 166{
 167        cpumask_t *mask;
 168        struct etm_event_data *event_data;
 169
 170        /* First get memory for the session's data */
 171        event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
 172        if (!event_data)
 173                return NULL;
 174
 175
 176        mask = &event_data->mask;
 177        if (cpu != -1)
 178                cpumask_set_cpu(cpu, mask);
 179        else
 180                cpumask_copy(mask, cpu_present_mask);
 181
 182        /*
 183         * Each CPU has a single path between source and destination.  As such
 184         * allocate an array using CPU numbers as indexes.  That way a path
 185         * for any CPU can easily be accessed at any given time.  We proceed
 186         * the same way for sessions involving a single CPU.  The cost of
 187         * unused memory when dealing with single CPU trace scenarios is small
 188         * compared to the cost of searching through an optimized array.
 189         */
 190        event_data->path = alloc_percpu(struct list_head *);
 191
 192        if (!event_data->path) {
 193                kfree(event_data);
 194                return NULL;
 195        }
 196
 197        return event_data;
 198}
 199
 200static void etm_free_aux(void *data)
 201{
 202        struct etm_event_data *event_data = data;
 203
 204        schedule_work(&event_data->work);
 205}
 206
 207static void *etm_setup_aux(struct perf_event *event, void **pages,
 208                           int nr_pages, bool overwrite)
 209{
 210        u32 id;
 211        int cpu = event->cpu;
 212        cpumask_t *mask;
 213        struct coresight_device *sink;
 214        struct etm_event_data *event_data = NULL;
 215
 216        event_data = alloc_event_data(cpu);
 217        if (!event_data)
 218                return NULL;
 219        INIT_WORK(&event_data->work, free_event_data);
 220
 221        /* First get the selected sink from user space. */
 222        if (event->attr.config2) {
 223                id = (u32)event->attr.config2;
 224                sink = coresight_get_sink_by_id(id);
 225        } else {
 226                sink = coresight_get_enabled_sink(true);
 227        }
 228
 229        if (!sink)
 230                goto err;
 231
 232        mask = &event_data->mask;
 233
 234        /*
 235         * Setup the path for each CPU in a trace session. We try to build
 236         * trace path for each CPU in the mask. If we don't find an ETM
 237         * for the CPU or fail to build a path, we clear the CPU from the
 238         * mask and continue with the rest. If ever we try to trace on those
 239         * CPUs, we can handle it and fail the session.
 240         */
 241        for_each_cpu(cpu, mask) {
 242                struct list_head *path;
 243                struct coresight_device *csdev;
 244
 245                csdev = per_cpu(csdev_src, cpu);
 246                /*
 247                 * If there is no ETM associated with this CPU clear it from
 248                 * the mask and continue with the rest. If ever we try to trace
 249                 * on this CPU, we handle it accordingly.
 250                 */
 251                if (!csdev) {
 252                        cpumask_clear_cpu(cpu, mask);
 253                        continue;
 254                }
 255
 256                /*
 257                 * Building a path doesn't enable it, it simply builds a
 258                 * list of devices from source to sink that can be
 259                 * referenced later when the path is actually needed.
 260                 */
 261                path = coresight_build_path(csdev, sink);
 262                if (IS_ERR(path)) {
 263                        cpumask_clear_cpu(cpu, mask);
 264                        continue;
 265                }
 266
 267                *etm_event_cpu_path_ptr(event_data, cpu) = path;
 268        }
 269
 270        /* If we don't have any CPUs ready for tracing, abort */
 271        cpu = cpumask_first(mask);
 272        if (cpu >= nr_cpu_ids)
 273                goto err;
 274
 275        if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
 276                goto err;
 277
 278        /* Allocate the sink buffer for this session */
 279        event_data->snk_config =
 280                        sink_ops(sink)->alloc_buffer(sink, event, pages,
 281                                                     nr_pages, overwrite);
 282        if (!event_data->snk_config)
 283                goto err;
 284
 285out:
 286        return event_data;
 287
 288err:
 289        etm_free_aux(event_data);
 290        event_data = NULL;
 291        goto out;
 292}
 293
 294static void etm_event_start(struct perf_event *event, int flags)
 295{
 296        int cpu = smp_processor_id();
 297        struct etm_event_data *event_data;
 298        struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
 299        struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
 300        struct list_head *path;
 301
 302        if (!csdev)
 303                goto fail;
 304
 305        /*
 306         * Deal with the ring buffer API and get a handle on the
 307         * session's information.
 308         */
 309        event_data = perf_aux_output_begin(handle, event);
 310        if (!event_data)
 311                goto fail;
 312
 313        path = etm_event_cpu_path(event_data, cpu);
 314        /* We need a sink, no need to continue without one */
 315        sink = coresight_get_sink(path);
 316        if (WARN_ON_ONCE(!sink))
 317                goto fail_end_stop;
 318
 319        /* Nothing will happen without a path */
 320        if (coresight_enable_path(path, CS_MODE_PERF, handle))
 321                goto fail_end_stop;
 322
 323        /* Tell the perf core the event is alive */
 324        event->hw.state = 0;
 325
 326        /* Finally enable the tracer */
 327        if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
 328                goto fail_disable_path;
 329
 330out:
 331        return;
 332
 333fail_disable_path:
 334        coresight_disable_path(path);
 335fail_end_stop:
 336        perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
 337        perf_aux_output_end(handle, 0);
 338fail:
 339        event->hw.state = PERF_HES_STOPPED;
 340        goto out;
 341}
 342
 343static void etm_event_stop(struct perf_event *event, int mode)
 344{
 345        int cpu = smp_processor_id();
 346        unsigned long size;
 347        struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
 348        struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
 349        struct etm_event_data *event_data = perf_get_aux(handle);
 350        struct list_head *path;
 351
 352        if (event->hw.state == PERF_HES_STOPPED)
 353                return;
 354
 355        if (!csdev)
 356                return;
 357
 358        path = etm_event_cpu_path(event_data, cpu);
 359        if (!path)
 360                return;
 361
 362        sink = coresight_get_sink(path);
 363        if (!sink)
 364                return;
 365
 366        /* stop tracer */
 367        source_ops(csdev)->disable(csdev, event);
 368
 369        /* tell the core */
 370        event->hw.state = PERF_HES_STOPPED;
 371
 372        if (mode & PERF_EF_UPDATE) {
 373                if (WARN_ON_ONCE(handle->event != event))
 374                        return;
 375
 376                /* update trace information */
 377                if (!sink_ops(sink)->update_buffer)
 378                        return;
 379
 380                size = sink_ops(sink)->update_buffer(sink, handle,
 381                                              event_data->snk_config);
 382                perf_aux_output_end(handle, size);
 383        }
 384
 385        /* Disabling the path make its elements available to other sessions */
 386        coresight_disable_path(path);
 387}
 388
 389static int etm_event_add(struct perf_event *event, int mode)
 390{
 391        int ret = 0;
 392        struct hw_perf_event *hwc = &event->hw;
 393
 394        if (mode & PERF_EF_START) {
 395                etm_event_start(event, 0);
 396                if (hwc->state & PERF_HES_STOPPED)
 397                        ret = -EINVAL;
 398        } else {
 399                hwc->state = PERF_HES_STOPPED;
 400        }
 401
 402        return ret;
 403}
 404
 405static void etm_event_del(struct perf_event *event, int mode)
 406{
 407        etm_event_stop(event, PERF_EF_UPDATE);
 408}
 409
 410static int etm_addr_filters_validate(struct list_head *filters)
 411{
 412        bool range = false, address = false;
 413        int index = 0;
 414        struct perf_addr_filter *filter;
 415
 416        list_for_each_entry(filter, filters, entry) {
 417                /*
 418                 * No need to go further if there's no more
 419                 * room for filters.
 420                 */
 421                if (++index > ETM_ADDR_CMP_MAX)
 422                        return -EOPNOTSUPP;
 423
 424                /* filter::size==0 means single address trigger */
 425                if (filter->size) {
 426                        /*
 427                         * The existing code relies on START/STOP filters
 428                         * being address filters.
 429                         */
 430                        if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
 431                            filter->action == PERF_ADDR_FILTER_ACTION_STOP)
 432                                return -EOPNOTSUPP;
 433
 434                        range = true;
 435                } else
 436                        address = true;
 437
 438                /*
 439                 * At this time we don't allow range and start/stop filtering
 440                 * to cohabitate, they have to be mutually exclusive.
 441                 */
 442                if (range && address)
 443                        return -EOPNOTSUPP;
 444        }
 445
 446        return 0;
 447}
 448
 449static void etm_addr_filters_sync(struct perf_event *event)
 450{
 451        struct perf_addr_filters_head *head = perf_event_addr_filters(event);
 452        unsigned long start, stop;
 453        struct perf_addr_filter_range *fr = event->addr_filter_ranges;
 454        struct etm_filters *filters = event->hw.addr_filters;
 455        struct etm_filter *etm_filter;
 456        struct perf_addr_filter *filter;
 457        int i = 0;
 458
 459        list_for_each_entry(filter, &head->list, entry) {
 460                start = fr[i].start;
 461                stop = start + fr[i].size;
 462                etm_filter = &filters->etm_filter[i];
 463
 464                switch (filter->action) {
 465                case PERF_ADDR_FILTER_ACTION_FILTER:
 466                        etm_filter->start_addr = start;
 467                        etm_filter->stop_addr = stop;
 468                        etm_filter->type = ETM_ADDR_TYPE_RANGE;
 469                        break;
 470                case PERF_ADDR_FILTER_ACTION_START:
 471                        etm_filter->start_addr = start;
 472                        etm_filter->type = ETM_ADDR_TYPE_START;
 473                        break;
 474                case PERF_ADDR_FILTER_ACTION_STOP:
 475                        etm_filter->stop_addr = stop;
 476                        etm_filter->type = ETM_ADDR_TYPE_STOP;
 477                        break;
 478                }
 479                i++;
 480        }
 481
 482        filters->nr_filters = i;
 483}
 484
 485int etm_perf_symlink(struct coresight_device *csdev, bool link)
 486{
 487        char entry[sizeof("cpu9999999")];
 488        int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
 489        struct device *pmu_dev = etm_pmu.dev;
 490        struct device *cs_dev = &csdev->dev;
 491
 492        sprintf(entry, "cpu%d", cpu);
 493
 494        if (!etm_perf_up)
 495                return -EPROBE_DEFER;
 496
 497        if (link) {
 498                ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
 499                if (ret)
 500                        return ret;
 501                per_cpu(csdev_src, cpu) = csdev;
 502        } else {
 503                sysfs_remove_link(&pmu_dev->kobj, entry);
 504                per_cpu(csdev_src, cpu) = NULL;
 505        }
 506
 507        return 0;
 508}
 509
 510static ssize_t etm_perf_sink_name_show(struct device *dev,
 511                                       struct device_attribute *dattr,
 512                                       char *buf)
 513{
 514        struct dev_ext_attribute *ea;
 515
 516        ea = container_of(dattr, struct dev_ext_attribute, attr);
 517        return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
 518}
 519
 520int etm_perf_add_symlink_sink(struct coresight_device *csdev)
 521{
 522        int ret;
 523        unsigned long hash;
 524        const char *name;
 525        struct device *pmu_dev = etm_pmu.dev;
 526        struct device *pdev = csdev->dev.parent;
 527        struct dev_ext_attribute *ea;
 528
 529        if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
 530            csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
 531                return -EINVAL;
 532
 533        if (csdev->ea != NULL)
 534                return -EINVAL;
 535
 536        if (!etm_perf_up)
 537                return -EPROBE_DEFER;
 538
 539        ea = devm_kzalloc(pdev, sizeof(*ea), GFP_KERNEL);
 540        if (!ea)
 541                return -ENOMEM;
 542
 543        name = dev_name(pdev);
 544        /* See function coresight_get_sink_by_id() to know where this is used */
 545        hash = hashlen_hash(hashlen_string(NULL, name));
 546
 547        ea->attr.attr.name = devm_kstrdup(pdev, name, GFP_KERNEL);
 548        if (!ea->attr.attr.name)
 549                return -ENOMEM;
 550
 551        ea->attr.attr.mode = 0444;
 552        ea->attr.show = etm_perf_sink_name_show;
 553        ea->var = (unsigned long *)hash;
 554
 555        ret = sysfs_add_file_to_group(&pmu_dev->kobj,
 556                                      &ea->attr.attr, "sinks");
 557
 558        if (!ret)
 559                csdev->ea = ea;
 560
 561        return ret;
 562}
 563
 564void etm_perf_del_symlink_sink(struct coresight_device *csdev)
 565{
 566        struct device *pmu_dev = etm_pmu.dev;
 567        struct dev_ext_attribute *ea = csdev->ea;
 568
 569        if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
 570            csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
 571                return;
 572
 573        if (!ea)
 574                return;
 575
 576        sysfs_remove_file_from_group(&pmu_dev->kobj,
 577                                     &ea->attr.attr, "sinks");
 578        csdev->ea = NULL;
 579}
 580
 581static int __init etm_perf_init(void)
 582{
 583        int ret;
 584
 585        etm_pmu.capabilities            = (PERF_PMU_CAP_EXCLUSIVE |
 586                                           PERF_PMU_CAP_ITRACE);
 587
 588        etm_pmu.attr_groups             = etm_pmu_attr_groups;
 589        etm_pmu.task_ctx_nr             = perf_sw_context;
 590        etm_pmu.read                    = etm_event_read;
 591        etm_pmu.event_init              = etm_event_init;
 592        etm_pmu.setup_aux               = etm_setup_aux;
 593        etm_pmu.free_aux                = etm_free_aux;
 594        etm_pmu.start                   = etm_event_start;
 595        etm_pmu.stop                    = etm_event_stop;
 596        etm_pmu.add                     = etm_event_add;
 597        etm_pmu.del                     = etm_event_del;
 598        etm_pmu.addr_filters_sync       = etm_addr_filters_sync;
 599        etm_pmu.addr_filters_validate   = etm_addr_filters_validate;
 600        etm_pmu.nr_addr_filters         = ETM_ADDR_CMP_MAX;
 601
 602        ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
 603        if (ret == 0)
 604                etm_perf_up = true;
 605
 606        return ret;
 607}
 608device_initcall(etm_perf_init);
 609