linux/tools/perf/builtin-ftrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * builtin-ftrace.c
   4 *
   5 * Copyright (c) 2013  LG Electronics,  Namhyung Kim <namhyung@kernel.org>
   6 */
   7
   8#include "builtin.h"
   9#include "perf.h"
  10
  11#include <errno.h>
  12#include <unistd.h>
  13#include <signal.h>
  14#include <fcntl.h>
  15#include <poll.h>
  16
  17#include "debug.h"
  18#include <subcmd/parse-options.h>
  19#include <api/fs/tracing_path.h>
  20#include "evlist.h"
  21#include "target.h"
  22#include "cpumap.h"
  23#include "thread_map.h"
  24#include "util/config.h"
  25
  26
  27#define DEFAULT_TRACER  "function_graph"
  28
  29struct perf_ftrace {
  30        struct perf_evlist      *evlist;
  31        struct target           target;
  32        const char              *tracer;
  33        struct list_head        filters;
  34        struct list_head        notrace;
  35        struct list_head        graph_funcs;
  36        struct list_head        nograph_funcs;
  37        int                     graph_depth;
  38};
  39
  40struct filter_entry {
  41        struct list_head        list;
  42        char                    name[];
  43};
  44
  45static bool done;
  46
  47static void sig_handler(int sig __maybe_unused)
  48{
  49        done = true;
  50}
  51
  52/*
  53 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
  54 * we asked by setting its exec_error to the function below,
  55 * ftrace__workload_exec_failed_signal.
  56 *
  57 * XXX We need to handle this more appropriately, emitting an error, etc.
  58 */
  59static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
  60                                                siginfo_t *info __maybe_unused,
  61                                                void *ucontext __maybe_unused)
  62{
  63        /* workload_exec_errno = info->si_value.sival_int; */
  64        done = true;
  65}
  66
  67static int __write_tracing_file(const char *name, const char *val, bool append)
  68{
  69        char *file;
  70        int fd, ret = -1;
  71        ssize_t size = strlen(val);
  72        int flags = O_WRONLY;
  73        char errbuf[512];
  74        char *val_copy;
  75
  76        file = get_tracing_file(name);
  77        if (!file) {
  78                pr_debug("cannot get tracing file: %s\n", name);
  79                return -1;
  80        }
  81
  82        if (append)
  83                flags |= O_APPEND;
  84        else
  85                flags |= O_TRUNC;
  86
  87        fd = open(file, flags);
  88        if (fd < 0) {
  89                pr_debug("cannot open tracing file: %s: %s\n",
  90                         name, str_error_r(errno, errbuf, sizeof(errbuf)));
  91                goto out;
  92        }
  93
  94        /*
  95         * Copy the original value and append a '\n'. Without this,
  96         * the kernel can hide possible errors.
  97         */
  98        val_copy = strdup(val);
  99        if (!val_copy)
 100                goto out_close;
 101        val_copy[size] = '\n';
 102
 103        if (write(fd, val_copy, size + 1) == size + 1)
 104                ret = 0;
 105        else
 106                pr_debug("write '%s' to tracing/%s failed: %s\n",
 107                         val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
 108
 109        free(val_copy);
 110out_close:
 111        close(fd);
 112out:
 113        put_tracing_file(file);
 114        return ret;
 115}
 116
 117static int write_tracing_file(const char *name, const char *val)
 118{
 119        return __write_tracing_file(name, val, false);
 120}
 121
 122static int append_tracing_file(const char *name, const char *val)
 123{
 124        return __write_tracing_file(name, val, true);
 125}
 126
 127static int reset_tracing_cpu(void);
 128static void reset_tracing_filters(void);
 129
 130static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
 131{
 132        if (write_tracing_file("tracing_on", "0") < 0)
 133                return -1;
 134
 135        if (write_tracing_file("current_tracer", "nop") < 0)
 136                return -1;
 137
 138        if (write_tracing_file("set_ftrace_pid", " ") < 0)
 139                return -1;
 140
 141        if (reset_tracing_cpu() < 0)
 142                return -1;
 143
 144        if (write_tracing_file("max_graph_depth", "0") < 0)
 145                return -1;
 146
 147        reset_tracing_filters();
 148        return 0;
 149}
 150
 151static int set_tracing_pid(struct perf_ftrace *ftrace)
 152{
 153        int i;
 154        char buf[16];
 155
 156        if (target__has_cpu(&ftrace->target))
 157                return 0;
 158
 159        for (i = 0; i < thread_map__nr(ftrace->evlist->threads); i++) {
 160                scnprintf(buf, sizeof(buf), "%d",
 161                          ftrace->evlist->threads->map[i]);
 162                if (append_tracing_file("set_ftrace_pid", buf) < 0)
 163                        return -1;
 164        }
 165        return 0;
 166}
 167
 168static int set_tracing_cpumask(struct cpu_map *cpumap)
 169{
 170        char *cpumask;
 171        size_t mask_size;
 172        int ret;
 173        int last_cpu;
 174
 175        last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
 176        mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
 177        mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
 178
 179        cpumask = malloc(mask_size);
 180        if (cpumask == NULL) {
 181                pr_debug("failed to allocate cpu mask\n");
 182                return -1;
 183        }
 184
 185        cpu_map__snprint_mask(cpumap, cpumask, mask_size);
 186
 187        ret = write_tracing_file("tracing_cpumask", cpumask);
 188
 189        free(cpumask);
 190        return ret;
 191}
 192
 193static int set_tracing_cpu(struct perf_ftrace *ftrace)
 194{
 195        struct cpu_map *cpumap = ftrace->evlist->cpus;
 196
 197        if (!target__has_cpu(&ftrace->target))
 198                return 0;
 199
 200        return set_tracing_cpumask(cpumap);
 201}
 202
 203static int reset_tracing_cpu(void)
 204{
 205        struct cpu_map *cpumap = cpu_map__new(NULL);
 206        int ret;
 207
 208        ret = set_tracing_cpumask(cpumap);
 209        cpu_map__put(cpumap);
 210        return ret;
 211}
 212
 213static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
 214{
 215        struct filter_entry *pos;
 216
 217        list_for_each_entry(pos, funcs, list) {
 218                if (append_tracing_file(filter_file, pos->name) < 0)
 219                        return -1;
 220        }
 221
 222        return 0;
 223}
 224
 225static int set_tracing_filters(struct perf_ftrace *ftrace)
 226{
 227        int ret;
 228
 229        ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
 230        if (ret < 0)
 231                return ret;
 232
 233        ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
 234        if (ret < 0)
 235                return ret;
 236
 237        ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
 238        if (ret < 0)
 239                return ret;
 240
 241        /* old kernels do not have this filter */
 242        __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
 243
 244        return ret;
 245}
 246
 247static void reset_tracing_filters(void)
 248{
 249        write_tracing_file("set_ftrace_filter", " ");
 250        write_tracing_file("set_ftrace_notrace", " ");
 251        write_tracing_file("set_graph_function", " ");
 252        write_tracing_file("set_graph_notrace", " ");
 253}
 254
 255static int set_tracing_depth(struct perf_ftrace *ftrace)
 256{
 257        char buf[16];
 258
 259        if (ftrace->graph_depth == 0)
 260                return 0;
 261
 262        if (ftrace->graph_depth < 0) {
 263                pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
 264                return -1;
 265        }
 266
 267        snprintf(buf, sizeof(buf), "%d", ftrace->graph_depth);
 268
 269        if (write_tracing_file("max_graph_depth", buf) < 0)
 270                return -1;
 271
 272        return 0;
 273}
 274
 275static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
 276{
 277        char *trace_file;
 278        int trace_fd;
 279        char buf[4096];
 280        struct pollfd pollfd = {
 281                .events = POLLIN,
 282        };
 283
 284        if (geteuid() != 0) {
 285                pr_err("ftrace only works for root!\n");
 286                return -1;
 287        }
 288
 289        signal(SIGINT, sig_handler);
 290        signal(SIGUSR1, sig_handler);
 291        signal(SIGCHLD, sig_handler);
 292        signal(SIGPIPE, sig_handler);
 293
 294        if (reset_tracing_files(ftrace) < 0) {
 295                pr_err("failed to reset ftrace\n");
 296                goto out;
 297        }
 298
 299        /* reset ftrace buffer */
 300        if (write_tracing_file("trace", "0") < 0)
 301                goto out;
 302
 303        if (argc && perf_evlist__prepare_workload(ftrace->evlist,
 304                                &ftrace->target, argv, false,
 305                                ftrace__workload_exec_failed_signal) < 0) {
 306                goto out;
 307        }
 308
 309        if (set_tracing_pid(ftrace) < 0) {
 310                pr_err("failed to set ftrace pid\n");
 311                goto out_reset;
 312        }
 313
 314        if (set_tracing_cpu(ftrace) < 0) {
 315                pr_err("failed to set tracing cpumask\n");
 316                goto out_reset;
 317        }
 318
 319        if (set_tracing_filters(ftrace) < 0) {
 320                pr_err("failed to set tracing filters\n");
 321                goto out_reset;
 322        }
 323
 324        if (set_tracing_depth(ftrace) < 0) {
 325                pr_err("failed to set graph depth\n");
 326                goto out_reset;
 327        }
 328
 329        if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
 330                pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
 331                goto out_reset;
 332        }
 333
 334        setup_pager();
 335
 336        trace_file = get_tracing_file("trace_pipe");
 337        if (!trace_file) {
 338                pr_err("failed to open trace_pipe\n");
 339                goto out_reset;
 340        }
 341
 342        trace_fd = open(trace_file, O_RDONLY);
 343
 344        put_tracing_file(trace_file);
 345
 346        if (trace_fd < 0) {
 347                pr_err("failed to open trace_pipe\n");
 348                goto out_reset;
 349        }
 350
 351        fcntl(trace_fd, F_SETFL, O_NONBLOCK);
 352        pollfd.fd = trace_fd;
 353
 354        if (write_tracing_file("tracing_on", "1") < 0) {
 355                pr_err("can't enable tracing\n");
 356                goto out_close_fd;
 357        }
 358
 359        perf_evlist__start_workload(ftrace->evlist);
 360
 361        while (!done) {
 362                if (poll(&pollfd, 1, -1) < 0)
 363                        break;
 364
 365                if (pollfd.revents & POLLIN) {
 366                        int n = read(trace_fd, buf, sizeof(buf));
 367                        if (n < 0)
 368                                break;
 369                        if (fwrite(buf, n, 1, stdout) != 1)
 370                                break;
 371                }
 372        }
 373
 374        write_tracing_file("tracing_on", "0");
 375
 376        /* read remaining buffer contents */
 377        while (true) {
 378                int n = read(trace_fd, buf, sizeof(buf));
 379                if (n <= 0)
 380                        break;
 381                if (fwrite(buf, n, 1, stdout) != 1)
 382                        break;
 383        }
 384
 385out_close_fd:
 386        close(trace_fd);
 387out_reset:
 388        reset_tracing_files(ftrace);
 389out:
 390        return done ? 0 : -1;
 391}
 392
 393static int perf_ftrace_config(const char *var, const char *value, void *cb)
 394{
 395        struct perf_ftrace *ftrace = cb;
 396
 397        if (!strstarts(var, "ftrace."))
 398                return 0;
 399
 400        if (strcmp(var, "ftrace.tracer"))
 401                return -1;
 402
 403        if (!strcmp(value, "function_graph") ||
 404            !strcmp(value, "function")) {
 405                ftrace->tracer = value;
 406                return 0;
 407        }
 408
 409        pr_err("Please select \"function_graph\" (default) or \"function\"\n");
 410        return -1;
 411}
 412
 413static int parse_filter_func(const struct option *opt, const char *str,
 414                             int unset __maybe_unused)
 415{
 416        struct list_head *head = opt->value;
 417        struct filter_entry *entry;
 418
 419        entry = malloc(sizeof(*entry) + strlen(str) + 1);
 420        if (entry == NULL)
 421                return -ENOMEM;
 422
 423        strcpy(entry->name, str);
 424        list_add_tail(&entry->list, head);
 425
 426        return 0;
 427}
 428
 429static void delete_filter_func(struct list_head *head)
 430{
 431        struct filter_entry *pos, *tmp;
 432
 433        list_for_each_entry_safe(pos, tmp, head, list) {
 434                list_del_init(&pos->list);
 435                free(pos);
 436        }
 437}
 438
 439int cmd_ftrace(int argc, const char **argv)
 440{
 441        int ret;
 442        struct perf_ftrace ftrace = {
 443                .tracer = DEFAULT_TRACER,
 444                .target = { .uid = UINT_MAX, },
 445        };
 446        const char * const ftrace_usage[] = {
 447                "perf ftrace [<options>] [<command>]",
 448                "perf ftrace [<options>] -- <command> [<options>]",
 449                NULL
 450        };
 451        const struct option ftrace_options[] = {
 452        OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
 453                   "tracer to use: function_graph(default) or function"),
 454        OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
 455                   "trace on existing process id"),
 456        OPT_INCR('v', "verbose", &verbose,
 457                 "be more verbose"),
 458        OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
 459                    "system-wide collection from all CPUs"),
 460        OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
 461                    "list of cpus to monitor"),
 462        OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
 463                     "trace given functions only", parse_filter_func),
 464        OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
 465                     "do not trace given functions", parse_filter_func),
 466        OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
 467                     "Set graph filter on given functions", parse_filter_func),
 468        OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
 469                     "Set nograph filter on given functions", parse_filter_func),
 470        OPT_INTEGER('D', "graph-depth", &ftrace.graph_depth,
 471                    "Max depth for function graph tracer"),
 472        OPT_END()
 473        };
 474
 475        INIT_LIST_HEAD(&ftrace.filters);
 476        INIT_LIST_HEAD(&ftrace.notrace);
 477        INIT_LIST_HEAD(&ftrace.graph_funcs);
 478        INIT_LIST_HEAD(&ftrace.nograph_funcs);
 479
 480        ret = perf_config(perf_ftrace_config, &ftrace);
 481        if (ret < 0)
 482                return -1;
 483
 484        argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
 485                            PARSE_OPT_STOP_AT_NON_OPTION);
 486        if (!argc && target__none(&ftrace.target))
 487                usage_with_options(ftrace_usage, ftrace_options);
 488
 489        ret = target__validate(&ftrace.target);
 490        if (ret) {
 491                char errbuf[512];
 492
 493                target__strerror(&ftrace.target, ret, errbuf, 512);
 494                pr_err("%s\n", errbuf);
 495                goto out_delete_filters;
 496        }
 497
 498        ftrace.evlist = perf_evlist__new();
 499        if (ftrace.evlist == NULL) {
 500                ret = -ENOMEM;
 501                goto out_delete_filters;
 502        }
 503
 504        ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
 505        if (ret < 0)
 506                goto out_delete_evlist;
 507
 508        ret = __cmd_ftrace(&ftrace, argc, argv);
 509
 510out_delete_evlist:
 511        perf_evlist__delete(ftrace.evlist);
 512
 513out_delete_filters:
 514        delete_filter_func(&ftrace.filters);
 515        delete_filter_func(&ftrace.notrace);
 516        delete_filter_func(&ftrace.graph_funcs);
 517        delete_filter_func(&ftrace.nograph_funcs);
 518
 519        return ret;
 520}
 521