linux/kernel/trace/trace_functions.c
<<
>>
Prefs
   1/*
   2 * ring buffer based function tracer
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Based on code from the latency_tracer, that is:
   8 *
   9 *  Copyright (C) 2004-2006 Ingo Molnar
  10 *  Copyright (C) 2004 Nadia Yvette Chambers
  11 */
  12#include <linux/ring_buffer.h>
  13#include <linux/debugfs.h>
  14#include <linux/uaccess.h>
  15#include <linux/ftrace.h>
  16#include <linux/slab.h>
  17#include <linux/fs.h>
  18
  19#include "trace.h"
  20
  21static void tracing_start_function_trace(struct trace_array *tr);
  22static void tracing_stop_function_trace(struct trace_array *tr);
  23static void
  24function_trace_call(unsigned long ip, unsigned long parent_ip,
  25                    struct ftrace_ops *op, struct pt_regs *pt_regs);
  26static void
  27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  28                          struct ftrace_ops *op, struct pt_regs *pt_regs);
  29static struct tracer_flags func_flags;
  30
  31/* Our option */
  32enum {
  33        TRACE_FUNC_OPT_STACK    = 0x1,
  34};
  35
  36static int allocate_ftrace_ops(struct trace_array *tr)
  37{
  38        struct ftrace_ops *ops;
  39
  40        ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  41        if (!ops)
  42                return -ENOMEM;
  43
  44        /* Currently only the non stack verision is supported */
  45        ops->func = function_trace_call;
  46        ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
  47
  48        tr->ops = ops;
  49        ops->private = tr;
  50        return 0;
  51}
  52
  53
  54int ftrace_create_function_files(struct trace_array *tr,
  55                                 struct dentry *parent)
  56{
  57        int ret;
  58
  59        /*
  60         * The top level array uses the "global_ops", and the files are
  61         * created on boot up.
  62         */
  63        if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  64                return 0;
  65
  66        ret = allocate_ftrace_ops(tr);
  67        if (ret)
  68                return ret;
  69
  70        ftrace_create_filter_files(tr->ops, parent);
  71
  72        return 0;
  73}
  74
  75void ftrace_destroy_function_files(struct trace_array *tr)
  76{
  77        ftrace_destroy_filter_files(tr->ops);
  78        kfree(tr->ops);
  79        tr->ops = NULL;
  80}
  81
  82static int function_trace_init(struct trace_array *tr)
  83{
  84        ftrace_func_t func;
  85
  86        /*
  87         * Instance trace_arrays get their ops allocated
  88         * at instance creation. Unless it failed
  89         * the allocation.
  90         */
  91        if (!tr->ops)
  92                return -ENOMEM;
  93
  94        /* Currently only the global instance can do stack tracing */
  95        if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  96            func_flags.val & TRACE_FUNC_OPT_STACK)
  97                func = function_stack_trace_call;
  98        else
  99                func = function_trace_call;
 100
 101        ftrace_init_array_ops(tr, func);
 102
 103        tr->trace_buffer.cpu = get_cpu();
 104        put_cpu();
 105
 106        tracing_start_cmdline_record();
 107        tracing_start_function_trace(tr);
 108        return 0;
 109}
 110
 111static void function_trace_reset(struct trace_array *tr)
 112{
 113        tracing_stop_function_trace(tr);
 114        tracing_stop_cmdline_record();
 115        ftrace_reset_array_ops(tr);
 116}
 117
 118static void function_trace_start(struct trace_array *tr)
 119{
 120        tracing_reset_online_cpus(&tr->trace_buffer);
 121}
 122
 123static void
 124function_trace_call(unsigned long ip, unsigned long parent_ip,
 125                    struct ftrace_ops *op, struct pt_regs *pt_regs)
 126{
 127        struct trace_array *tr = op->private;
 128        struct trace_array_cpu *data;
 129        unsigned long flags;
 130        int bit;
 131        int cpu;
 132        int pc;
 133
 134        if (unlikely(!tr->function_enabled))
 135                return;
 136
 137        pc = preempt_count();
 138        preempt_disable_notrace();
 139
 140        bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
 141        if (bit < 0)
 142                goto out;
 143
 144        cpu = smp_processor_id();
 145        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 146        if (!atomic_read(&data->disabled)) {
 147                local_save_flags(flags);
 148                trace_function(tr, ip, parent_ip, flags, pc);
 149        }
 150        trace_clear_recursion(bit);
 151
 152 out:
 153        preempt_enable_notrace();
 154}
 155
 156static void
 157function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 158                          struct ftrace_ops *op, struct pt_regs *pt_regs)
 159{
 160        struct trace_array *tr = op->private;
 161        struct trace_array_cpu *data;
 162        unsigned long flags;
 163        long disabled;
 164        int cpu;
 165        int pc;
 166
 167        if (unlikely(!tr->function_enabled))
 168                return;
 169
 170        /*
 171         * Need to use raw, since this must be called before the
 172         * recursive protection is performed.
 173         */
 174        local_irq_save(flags);
 175        cpu = raw_smp_processor_id();
 176        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 177        disabled = atomic_inc_return(&data->disabled);
 178
 179        if (likely(disabled == 1)) {
 180                pc = preempt_count();
 181                trace_function(tr, ip, parent_ip, flags, pc);
 182                /*
 183                 * skip over 5 funcs:
 184                 *    __ftrace_trace_stack,
 185                 *    __trace_stack,
 186                 *    function_stack_trace_call
 187                 *    ftrace_list_func
 188                 *    ftrace_call
 189                 */
 190                __trace_stack(tr, flags, 5, pc);
 191        }
 192
 193        atomic_dec(&data->disabled);
 194        local_irq_restore(flags);
 195}
 196
 197static struct tracer_opt func_opts[] = {
 198#ifdef CONFIG_STACKTRACE
 199        { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
 200#endif
 201        { } /* Always set a last empty entry */
 202};
 203
 204static struct tracer_flags func_flags = {
 205        .val = 0, /* By default: all flags disabled */
 206        .opts = func_opts
 207};
 208
 209static void tracing_start_function_trace(struct trace_array *tr)
 210{
 211        tr->function_enabled = 0;
 212        register_ftrace_function(tr->ops);
 213        tr->function_enabled = 1;
 214}
 215
 216static void tracing_stop_function_trace(struct trace_array *tr)
 217{
 218        tr->function_enabled = 0;
 219        unregister_ftrace_function(tr->ops);
 220}
 221
 222static int
 223func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 224{
 225        switch (bit) {
 226        case TRACE_FUNC_OPT_STACK:
 227                /* do nothing if already set */
 228                if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
 229                        break;
 230
 231                unregister_ftrace_function(tr->ops);
 232
 233                if (set) {
 234                        tr->ops->func = function_stack_trace_call;
 235                        register_ftrace_function(tr->ops);
 236                } else {
 237                        tr->ops->func = function_trace_call;
 238                        register_ftrace_function(tr->ops);
 239                }
 240
 241                break;
 242        default:
 243                return -EINVAL;
 244        }
 245
 246        return 0;
 247}
 248
 249static struct tracer function_trace __tracer_data =
 250{
 251        .name           = "function",
 252        .init           = function_trace_init,
 253        .reset          = function_trace_reset,
 254        .start          = function_trace_start,
 255        .flags          = &func_flags,
 256        .set_flag       = func_set_flag,
 257        .allow_instances = true,
 258#ifdef CONFIG_FTRACE_SELFTEST
 259        .selftest       = trace_selftest_startup_function,
 260#endif
 261};
 262
 263#ifdef CONFIG_DYNAMIC_FTRACE
 264static int update_count(void **data)
 265{
 266        unsigned long *count = (long *)data;
 267
 268        if (!*count)
 269                return 0;
 270
 271        if (*count != -1)
 272                (*count)--;
 273
 274        return 1;
 275}
 276
 277static void
 278ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
 279{
 280        if (tracing_is_on())
 281                return;
 282
 283        if (update_count(data))
 284                tracing_on();
 285}
 286
 287static void
 288ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
 289{
 290        if (!tracing_is_on())
 291                return;
 292
 293        if (update_count(data))
 294                tracing_off();
 295}
 296
 297static void
 298ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
 299{
 300        if (tracing_is_on())
 301                return;
 302
 303        tracing_on();
 304}
 305
 306static void
 307ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
 308{
 309        if (!tracing_is_on())
 310                return;
 311
 312        tracing_off();
 313}
 314
 315/*
 316 * Skip 4:
 317 *   ftrace_stacktrace()
 318 *   function_trace_probe_call()
 319 *   ftrace_ops_list_func()
 320 *   ftrace_call()
 321 */
 322#define STACK_SKIP 4
 323
 324static void
 325ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
 326{
 327        trace_dump_stack(STACK_SKIP);
 328}
 329
 330static void
 331ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
 332{
 333        if (!tracing_is_on())
 334                return;
 335
 336        if (update_count(data))
 337                trace_dump_stack(STACK_SKIP);
 338}
 339
 340static void
 341ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 342{
 343        if (update_count(data))
 344                ftrace_dump(DUMP_ALL);
 345}
 346
 347/* Only dump the current CPU buffer. */
 348static void
 349ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 350{
 351        if (update_count(data))
 352                ftrace_dump(DUMP_ORIG);
 353}
 354
 355static int
 356ftrace_probe_print(const char *name, struct seq_file *m,
 357                   unsigned long ip, void *data)
 358{
 359        long count = (long)data;
 360
 361        seq_printf(m, "%ps:%s", (void *)ip, name);
 362
 363        if (count == -1)
 364                seq_printf(m, ":unlimited\n");
 365        else
 366                seq_printf(m, ":count=%ld\n", count);
 367
 368        return 0;
 369}
 370
 371static int
 372ftrace_traceon_print(struct seq_file *m, unsigned long ip,
 373                         struct ftrace_probe_ops *ops, void *data)
 374{
 375        return ftrace_probe_print("traceon", m, ip, data);
 376}
 377
 378static int
 379ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
 380                         struct ftrace_probe_ops *ops, void *data)
 381{
 382        return ftrace_probe_print("traceoff", m, ip, data);
 383}
 384
 385static int
 386ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
 387                        struct ftrace_probe_ops *ops, void *data)
 388{
 389        return ftrace_probe_print("stacktrace", m, ip, data);
 390}
 391
 392static int
 393ftrace_dump_print(struct seq_file *m, unsigned long ip,
 394                        struct ftrace_probe_ops *ops, void *data)
 395{
 396        return ftrace_probe_print("dump", m, ip, data);
 397}
 398
 399static int
 400ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
 401                        struct ftrace_probe_ops *ops, void *data)
 402{
 403        return ftrace_probe_print("cpudump", m, ip, data);
 404}
 405
 406static struct ftrace_probe_ops traceon_count_probe_ops = {
 407        .func                   = ftrace_traceon_count,
 408        .print                  = ftrace_traceon_print,
 409};
 410
 411static struct ftrace_probe_ops traceoff_count_probe_ops = {
 412        .func                   = ftrace_traceoff_count,
 413        .print                  = ftrace_traceoff_print,
 414};
 415
 416static struct ftrace_probe_ops stacktrace_count_probe_ops = {
 417        .func                   = ftrace_stacktrace_count,
 418        .print                  = ftrace_stacktrace_print,
 419};
 420
 421static struct ftrace_probe_ops dump_probe_ops = {
 422        .func                   = ftrace_dump_probe,
 423        .print                  = ftrace_dump_print,
 424};
 425
 426static struct ftrace_probe_ops cpudump_probe_ops = {
 427        .func                   = ftrace_cpudump_probe,
 428        .print                  = ftrace_cpudump_print,
 429};
 430
 431static struct ftrace_probe_ops traceon_probe_ops = {
 432        .func                   = ftrace_traceon,
 433        .print                  = ftrace_traceon_print,
 434};
 435
 436static struct ftrace_probe_ops traceoff_probe_ops = {
 437        .func                   = ftrace_traceoff,
 438        .print                  = ftrace_traceoff_print,
 439};
 440
 441static struct ftrace_probe_ops stacktrace_probe_ops = {
 442        .func                   = ftrace_stacktrace,
 443        .print                  = ftrace_stacktrace_print,
 444};
 445
 446static int
 447ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
 448                            struct ftrace_hash *hash, char *glob,
 449                            char *cmd, char *param, int enable)
 450{
 451        void *count = (void *)-1;
 452        char *number;
 453        int ret;
 454
 455        /* hash funcs only work with set_ftrace_filter */
 456        if (!enable)
 457                return -EINVAL;
 458
 459        if (glob[0] == '!') {
 460                unregister_ftrace_function_probe_func(glob+1, ops);
 461                return 0;
 462        }
 463
 464        if (!param)
 465                goto out_reg;
 466
 467        number = strsep(&param, ":");
 468
 469        if (!strlen(number))
 470                goto out_reg;
 471
 472        /*
 473         * We use the callback data field (which is a pointer)
 474         * as our counter.
 475         */
 476        ret = kstrtoul(number, 0, (unsigned long *)&count);
 477        if (ret)
 478                return ret;
 479
 480 out_reg:
 481        ret = register_ftrace_function_probe(glob, ops, count);
 482
 483        return ret < 0 ? ret : 0;
 484}
 485
 486static int
 487ftrace_trace_onoff_callback(struct ftrace_hash *hash,
 488                            char *glob, char *cmd, char *param, int enable)
 489{
 490        struct ftrace_probe_ops *ops;
 491
 492        /* we register both traceon and traceoff to this callback */
 493        if (strcmp(cmd, "traceon") == 0)
 494                ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
 495        else
 496                ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
 497
 498        return ftrace_trace_probe_callback(ops, hash, glob, cmd,
 499                                           param, enable);
 500}
 501
 502static int
 503ftrace_stacktrace_callback(struct ftrace_hash *hash,
 504                           char *glob, char *cmd, char *param, int enable)
 505{
 506        struct ftrace_probe_ops *ops;
 507
 508        ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
 509
 510        return ftrace_trace_probe_callback(ops, hash, glob, cmd,
 511                                           param, enable);
 512}
 513
 514static int
 515ftrace_dump_callback(struct ftrace_hash *hash,
 516                           char *glob, char *cmd, char *param, int enable)
 517{
 518        struct ftrace_probe_ops *ops;
 519
 520        ops = &dump_probe_ops;
 521
 522        /* Only dump once. */
 523        return ftrace_trace_probe_callback(ops, hash, glob, cmd,
 524                                           "1", enable);
 525}
 526
 527static int
 528ftrace_cpudump_callback(struct ftrace_hash *hash,
 529                           char *glob, char *cmd, char *param, int enable)
 530{
 531        struct ftrace_probe_ops *ops;
 532
 533        ops = &cpudump_probe_ops;
 534
 535        /* Only dump once. */
 536        return ftrace_trace_probe_callback(ops, hash, glob, cmd,
 537                                           "1", enable);
 538}
 539
 540static struct ftrace_func_command ftrace_traceon_cmd = {
 541        .name                   = "traceon",
 542        .func                   = ftrace_trace_onoff_callback,
 543};
 544
 545static struct ftrace_func_command ftrace_traceoff_cmd = {
 546        .name                   = "traceoff",
 547        .func                   = ftrace_trace_onoff_callback,
 548};
 549
 550static struct ftrace_func_command ftrace_stacktrace_cmd = {
 551        .name                   = "stacktrace",
 552        .func                   = ftrace_stacktrace_callback,
 553};
 554
 555static struct ftrace_func_command ftrace_dump_cmd = {
 556        .name                   = "dump",
 557        .func                   = ftrace_dump_callback,
 558};
 559
 560static struct ftrace_func_command ftrace_cpudump_cmd = {
 561        .name                   = "cpudump",
 562        .func                   = ftrace_cpudump_callback,
 563};
 564
 565static int __init init_func_cmd_traceon(void)
 566{
 567        int ret;
 568
 569        ret = register_ftrace_command(&ftrace_traceoff_cmd);
 570        if (ret)
 571                return ret;
 572
 573        ret = register_ftrace_command(&ftrace_traceon_cmd);
 574        if (ret)
 575                goto out_free_traceoff;
 576
 577        ret = register_ftrace_command(&ftrace_stacktrace_cmd);
 578        if (ret)
 579                goto out_free_traceon;
 580
 581        ret = register_ftrace_command(&ftrace_dump_cmd);
 582        if (ret)
 583                goto out_free_stacktrace;
 584
 585        ret = register_ftrace_command(&ftrace_cpudump_cmd);
 586        if (ret)
 587                goto out_free_dump;
 588
 589        return 0;
 590
 591 out_free_dump:
 592        unregister_ftrace_command(&ftrace_dump_cmd);
 593 out_free_stacktrace:
 594        unregister_ftrace_command(&ftrace_stacktrace_cmd);
 595 out_free_traceon:
 596        unregister_ftrace_command(&ftrace_traceon_cmd);
 597 out_free_traceoff:
 598        unregister_ftrace_command(&ftrace_traceoff_cmd);
 599
 600        return ret;
 601}
 602#else
 603static inline int init_func_cmd_traceon(void)
 604{
 605        return 0;
 606}
 607#endif /* CONFIG_DYNAMIC_FTRACE */
 608
 609static __init int init_function_trace(void)
 610{
 611        init_func_cmd_traceon();
 612        return register_tracer(&function_trace);
 613}
 614core_initcall(init_function_trace);
 615