linux/kernel/trace/trace_functions.c
<<
>>
Prefs
   1/*
   2 * ring buffer based function tracer
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Based on code from the latency_tracer, that is:
   8 *
   9 *  Copyright (C) 2004-2006 Ingo Molnar
  10 *  Copyright (C) 2004 Nadia Yvette Chambers
  11 */
  12#include <linux/ring_buffer.h>
  13#include <linux/debugfs.h>
  14#include <linux/uaccess.h>
  15#include <linux/ftrace.h>
  16#include <linux/slab.h>
  17#include <linux/fs.h>
  18
  19#include "trace.h"
  20
  21static void tracing_start_function_trace(struct trace_array *tr);
  22static void tracing_stop_function_trace(struct trace_array *tr);
  23static void
  24function_trace_call(unsigned long ip, unsigned long parent_ip,
  25                    struct ftrace_ops *op, struct pt_regs *pt_regs);
  26static void
  27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  28                          struct ftrace_ops *op, struct pt_regs *pt_regs);
  29static struct tracer_flags func_flags;
  30
  31/* Our option */
  32enum {
  33        TRACE_FUNC_OPT_STACK    = 0x1,
  34};
  35
  36static int allocate_ftrace_ops(struct trace_array *tr)
  37{
  38        struct ftrace_ops *ops;
  39
  40        ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  41        if (!ops)
  42                return -ENOMEM;
  43
  44        /* Currently only the non stack verision is supported */
  45        ops->func = function_trace_call;
  46        ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
  47
  48        tr->ops = ops;
  49        ops->private = tr;
  50        return 0;
  51}
  52
  53
  54int ftrace_create_function_files(struct trace_array *tr,
  55                                 struct dentry *parent)
  56{
  57        int ret;
  58
  59        /*
  60         * The top level array uses the "global_ops", and the files are
  61         * created on boot up.
  62         */
  63        if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  64                return 0;
  65
  66        ret = allocate_ftrace_ops(tr);
  67        if (ret)
  68                return ret;
  69
  70        ftrace_create_filter_files(tr->ops, parent);
  71
  72        return 0;
  73}
  74
  75void ftrace_destroy_function_files(struct trace_array *tr)
  76{
  77        ftrace_destroy_filter_files(tr->ops);
  78        kfree(tr->ops);
  79        tr->ops = NULL;
  80}
  81
  82static int function_trace_init(struct trace_array *tr)
  83{
  84        ftrace_func_t func;
  85
  86        /*
  87         * Instance trace_arrays get their ops allocated
  88         * at instance creation. Unless it failed
  89         * the allocation.
  90         */
  91        if (!tr->ops)
  92                return -ENOMEM;
  93
  94        /* Currently only the global instance can do stack tracing */
  95        if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  96            func_flags.val & TRACE_FUNC_OPT_STACK)
  97                func = function_stack_trace_call;
  98        else
  99                func = function_trace_call;
 100
 101        ftrace_init_array_ops(tr, func);
 102
 103        tr->trace_buffer.cpu = get_cpu();
 104        put_cpu();
 105
 106        tracing_start_cmdline_record();
 107        tracing_start_function_trace(tr);
 108        return 0;
 109}
 110
 111static void function_trace_reset(struct trace_array *tr)
 112{
 113        tracing_stop_function_trace(tr);
 114        tracing_stop_cmdline_record();
 115        ftrace_reset_array_ops(tr);
 116}
 117
 118static void function_trace_start(struct trace_array *tr)
 119{
 120        tracing_reset_online_cpus(&tr->trace_buffer);
 121}
 122
 123static void
 124function_trace_call(unsigned long ip, unsigned long parent_ip,
 125                    struct ftrace_ops *op, struct pt_regs *pt_regs)
 126{
 127        struct trace_array *tr = op->private;
 128        struct trace_array_cpu *data;
 129        unsigned long flags;
 130        int bit;
 131        int cpu;
 132        int pc;
 133
 134        if (unlikely(!tr->function_enabled))
 135                return;
 136
 137        pc = preempt_count();
 138        preempt_disable_notrace();
 139
 140        bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
 141        if (bit < 0)
 142                goto out;
 143
 144        cpu = smp_processor_id();
 145        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 146        if (!atomic_read(&data->disabled)) {
 147                local_save_flags(flags);
 148                trace_function(tr, ip, parent_ip, flags, pc);
 149        }
 150        trace_clear_recursion(bit);
 151
 152 out:
 153        preempt_enable_notrace();
 154}
 155
 156static void
 157function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 158                          struct ftrace_ops *op, struct pt_regs *pt_regs)
 159{
 160        struct trace_array *tr = op->private;
 161        struct trace_array_cpu *data;
 162        unsigned long flags;
 163        long disabled;
 164        int cpu;
 165        int pc;
 166
 167        if (unlikely(!tr->function_enabled))
 168                return;
 169
 170        /*
 171         * Need to use raw, since this must be called before the
 172         * recursive protection is performed.
 173         */
 174        local_irq_save(flags);
 175        cpu = raw_smp_processor_id();
 176        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 177        disabled = atomic_inc_return(&data->disabled);
 178
 179        if (likely(disabled == 1)) {
 180                pc = preempt_count();
 181                trace_function(tr, ip, parent_ip, flags, pc);
 182                /*
 183                 * skip over 5 funcs:
 184                 *    __ftrace_trace_stack,
 185                 *    __trace_stack,
 186                 *    function_stack_trace_call
 187                 *    ftrace_list_func
 188                 *    ftrace_call
 189                 */
 190                __trace_stack(tr, flags, 5, pc);
 191        }
 192
 193        atomic_dec(&data->disabled);
 194        local_irq_restore(flags);
 195}
 196
 197static struct tracer_opt func_opts[] = {
 198#ifdef CONFIG_STACKTRACE
 199        { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
 200#endif
 201        { } /* Always set a last empty entry */
 202};
 203
 204static struct tracer_flags func_flags = {
 205        .val = 0, /* By default: all flags disabled */
 206        .opts = func_opts
 207};
 208
 209static void tracing_start_function_trace(struct trace_array *tr)
 210{
 211        tr->function_enabled = 0;
 212        register_ftrace_function(tr->ops);
 213        tr->function_enabled = 1;
 214}
 215
 216static void tracing_stop_function_trace(struct trace_array *tr)
 217{
 218        tr->function_enabled = 0;
 219        unregister_ftrace_function(tr->ops);
 220}
 221
 222static struct tracer function_trace;
 223
 224static int
 225func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 226{
 227        switch (bit) {
 228        case TRACE_FUNC_OPT_STACK:
 229                /* do nothing if already set */
 230                if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
 231                        break;
 232
 233                /* We can change this flag when not running. */
 234                if (tr->current_trace != &function_trace)
 235                        break;
 236
 237                unregister_ftrace_function(tr->ops);
 238
 239                if (set) {
 240                        tr->ops->func = function_stack_trace_call;
 241                        register_ftrace_function(tr->ops);
 242                } else {
 243                        tr->ops->func = function_trace_call;
 244                        register_ftrace_function(tr->ops);
 245                }
 246
 247                break;
 248        default:
 249                return -EINVAL;
 250        }
 251
 252        return 0;
 253}
 254
 255static struct tracer function_trace __tracer_data =
 256{
 257        .name           = "function",
 258        .init           = function_trace_init,
 259        .reset          = function_trace_reset,
 260        .start          = function_trace_start,
 261        .flags          = &func_flags,
 262        .set_flag       = func_set_flag,
 263        .allow_instances = true,
 264#ifdef CONFIG_FTRACE_SELFTEST
 265        .selftest       = trace_selftest_startup_function,
 266#endif
 267};
 268
 269#ifdef CONFIG_DYNAMIC_FTRACE
 270static void update_traceon_count(void **data, bool on)
 271{
 272        long *count = (long *)data;
 273        long old_count = *count;
 274
 275        /*
 276         * Tracing gets disabled (or enabled) once per count.
 277         * This function can be called at the same time on multiple CPUs.
 278         * It is fine if both disable (or enable) tracing, as disabling
 279         * (or enabling) the second time doesn't do anything as the
 280         * state of the tracer is already disabled (or enabled).
 281         * What needs to be synchronized in this case is that the count
 282         * only gets decremented once, even if the tracer is disabled
 283         * (or enabled) twice, as the second one is really a nop.
 284         *
 285         * The memory barriers guarantee that we only decrement the
 286         * counter once. First the count is read to a local variable
 287         * and a read barrier is used to make sure that it is loaded
 288         * before checking if the tracer is in the state we want.
 289         * If the tracer is not in the state we want, then the count
 290         * is guaranteed to be the old count.
 291         *
 292         * Next the tracer is set to the state we want (disabled or enabled)
 293         * then a write memory barrier is used to make sure that
 294         * the new state is visible before changing the counter by
 295         * one minus the old counter. This guarantees that another CPU
 296         * executing this code will see the new state before seeing
 297         * the new counter value, and would not do anything if the new
 298         * counter is seen.
 299         *
 300         * Note, there is no synchronization between this and a user
 301         * setting the tracing_on file. But we currently don't care
 302         * about that.
 303         */
 304        if (!old_count)
 305                return;
 306
 307        /* Make sure we see count before checking tracing state */
 308        smp_rmb();
 309
 310        if (on == !!tracing_is_on())
 311                return;
 312
 313        if (on)
 314                tracing_on();
 315        else
 316                tracing_off();
 317
 318        /* unlimited? */
 319        if (old_count == -1)
 320                return;
 321
 322        /* Make sure tracing state is visible before updating count */
 323        smp_wmb();
 324
 325        *count = old_count - 1;
 326}
 327
 328static void
 329ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
 330{
 331        update_traceon_count(data, 1);
 332}
 333
 334static void
 335ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
 336{
 337        update_traceon_count(data, 0);
 338}
 339
 340static void
 341ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
 342{
 343        if (tracing_is_on())
 344                return;
 345
 346        tracing_on();
 347}
 348
 349static void
 350ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
 351{
 352        if (!tracing_is_on())
 353                return;
 354
 355        tracing_off();
 356}
 357
 358/*
 359 * Skip 4:
 360 *   ftrace_stacktrace()
 361 *   function_trace_probe_call()
 362 *   ftrace_ops_list_func()
 363 *   ftrace_call()
 364 */
 365#define STACK_SKIP 4
 366
 367static void
 368ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
 369{
 370        trace_dump_stack(STACK_SKIP);
 371}
 372
 373static void
 374ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
 375{
 376        long *count = (long *)data;
 377        long old_count;
 378        long new_count;
 379
 380        /*
 381         * Stack traces should only execute the number of times the
 382         * user specified in the counter.
 383         */
 384        do {
 385
 386                if (!tracing_is_on())
 387                        return;
 388
 389                old_count = *count;
 390
 391                if (!old_count)
 392                        return;
 393
 394                /* unlimited? */
 395                if (old_count == -1) {
 396                        trace_dump_stack(STACK_SKIP);
 397                        return;
 398                }
 399
 400                new_count = old_count - 1;
 401                new_count = cmpxchg(count, old_count, new_count);
 402                if (new_count == old_count)
 403                        trace_dump_stack(STACK_SKIP);
 404
 405        } while (new_count != old_count);
 406}
 407
 408static int update_count(void **data)
 409{
 410        unsigned long *count = (long *)data;
 411
 412        if (!*count)
 413                return 0;
 414
 415        if (*count != -1)
 416                (*count)--;
 417
 418        return 1;
 419}
 420
 421static void
 422ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 423{
 424        if (update_count(data))
 425                ftrace_dump(DUMP_ALL);
 426}
 427
 428/* Only dump the current CPU buffer. */
 429static void
 430ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 431{
 432        if (update_count(data))
 433                ftrace_dump(DUMP_ORIG);
 434}
 435
 436static int
 437ftrace_probe_print(const char *name, struct seq_file *m,
 438                   unsigned long ip, void *data)
 439{
 440        long count = (long)data;
 441
 442        seq_printf(m, "%ps:%s", (void *)ip, name);
 443
 444        if (count == -1)
 445                seq_puts(m, ":unlimited\n");
 446        else
 447                seq_printf(m, ":count=%ld\n", count);
 448
 449        return 0;
 450}
 451
 452static int
 453ftrace_traceon_print(struct seq_file *m, unsigned long ip,
 454                         struct ftrace_probe_ops *ops, void *data)
 455{
 456        return ftrace_probe_print("traceon", m, ip, data);
 457}
 458
 459static int
 460ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
 461                         struct ftrace_probe_ops *ops, void *data)
 462{
 463        return ftrace_probe_print("traceoff", m, ip, data);
 464}
 465
 466static int
 467ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
 468                        struct ftrace_probe_ops *ops, void *data)
 469{
 470        return ftrace_probe_print("stacktrace", m, ip, data);
 471}
 472
 473static int
 474ftrace_dump_print(struct seq_file *m, unsigned long ip,
 475                        struct ftrace_probe_ops *ops, void *data)
 476{
 477        return ftrace_probe_print("dump", m, ip, data);
 478}
 479
 480static int
 481ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
 482                        struct ftrace_probe_ops *ops, void *data)
 483{
 484        return ftrace_probe_print("cpudump", m, ip, data);
 485}
 486
 487static struct ftrace_probe_ops traceon_count_probe_ops = {
 488        .func                   = ftrace_traceon_count,
 489        .print                  = ftrace_traceon_print,
 490};
 491
 492static struct ftrace_probe_ops traceoff_count_probe_ops = {
 493        .func                   = ftrace_traceoff_count,
 494        .print                  = ftrace_traceoff_print,
 495};
 496
 497static struct ftrace_probe_ops stacktrace_count_probe_ops = {
 498        .func                   = ftrace_stacktrace_count,
 499        .print                  = ftrace_stacktrace_print,
 500};
 501
 502static struct ftrace_probe_ops dump_probe_ops = {
 503        .func                   = ftrace_dump_probe,
 504        .print                  = ftrace_dump_print,
 505};
 506
 507static struct ftrace_probe_ops cpudump_probe_ops = {
 508        .func                   = ftrace_cpudump_probe,
 509        .print                  = ftrace_cpudump_print,
 510};
 511
 512static struct ftrace_probe_ops traceon_probe_ops = {
 513        .func                   = ftrace_traceon,
 514        .print                  = ftrace_traceon_print,
 515};
 516
 517static struct ftrace_probe_ops traceoff_probe_ops = {
 518        .func                   = ftrace_traceoff,
 519        .print                  = ftrace_traceoff_print,
 520};
 521
 522static struct ftrace_probe_ops stacktrace_probe_ops = {
 523        .func                   = ftrace_stacktrace,
 524        .print                  = ftrace_stacktrace_print,
 525};
 526
 527static int
 528ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
 529                            struct ftrace_hash *hash, char *glob,
 530                            char *cmd, char *param, int enable)
 531{
 532        void *count = (void *)-1;
 533        char *number;
 534        int ret;
 535
 536        /* hash funcs only work with set_ftrace_filter */
 537        if (!enable)
 538                return -EINVAL;
 539
 540        if (glob[0] == '!') {
 541                unregister_ftrace_function_probe_func(glob+1, ops);
 542                return 0;
 543        }
 544
 545        if (!param)
 546                goto out_reg;
 547
 548        number = strsep(&param, ":");
 549
 550        if (!strlen(number))
 551                goto out_reg;
 552
 553        /*
 554         * We use the callback data field (which is a pointer)
 555         * as our counter.
 556         */
 557        ret = kstrtoul(number, 0, (unsigned long *)&count);
 558        if (ret)
 559                return ret;
 560
 561 out_reg:
 562        ret = register_ftrace_function_probe(glob, ops, count);
 563
 564        return ret < 0 ? ret : 0;
 565}
 566
 567static int
 568ftrace_trace_onoff_callback(struct ftrace_hash *hash,
 569                            char *glob, char *cmd, char *param, int enable)
 570{
 571        struct ftrace_probe_ops *ops;
 572
 573        /* we register both traceon and traceoff to this callback */
 574        if (strcmp(cmd, "traceon") == 0)
 575                ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
 576        else
 577                ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
 578
 579        return ftrace_trace_probe_callback(ops, hash, glob, cmd,
 580                                           param, enable);
 581}
 582
 583static int
 584ftrace_stacktrace_callback(struct ftrace_hash *hash,
 585                           char *glob, char *cmd, char *param, int enable)
 586{
 587        struct ftrace_probe_ops *ops;
 588
 589        ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
 590
 591        return ftrace_trace_probe_callback(ops, hash, glob, cmd,
 592                                           param, enable);
 593}
 594
 595static int
 596ftrace_dump_callback(struct ftrace_hash *hash,
 597                           char *glob, char *cmd, char *param, int enable)
 598{
 599        struct ftrace_probe_ops *ops;
 600
 601        ops = &dump_probe_ops;
 602
 603        /* Only dump once. */
 604        return ftrace_trace_probe_callback(ops, hash, glob, cmd,
 605                                           "1", enable);
 606}
 607
 608static int
 609ftrace_cpudump_callback(struct ftrace_hash *hash,
 610                           char *glob, char *cmd, char *param, int enable)
 611{
 612        struct ftrace_probe_ops *ops;
 613
 614        ops = &cpudump_probe_ops;
 615
 616        /* Only dump once. */
 617        return ftrace_trace_probe_callback(ops, hash, glob, cmd,
 618                                           "1", enable);
 619}
 620
 621static struct ftrace_func_command ftrace_traceon_cmd = {
 622        .name                   = "traceon",
 623        .func                   = ftrace_trace_onoff_callback,
 624};
 625
 626static struct ftrace_func_command ftrace_traceoff_cmd = {
 627        .name                   = "traceoff",
 628        .func                   = ftrace_trace_onoff_callback,
 629};
 630
 631static struct ftrace_func_command ftrace_stacktrace_cmd = {
 632        .name                   = "stacktrace",
 633        .func                   = ftrace_stacktrace_callback,
 634};
 635
 636static struct ftrace_func_command ftrace_dump_cmd = {
 637        .name                   = "dump",
 638        .func                   = ftrace_dump_callback,
 639};
 640
 641static struct ftrace_func_command ftrace_cpudump_cmd = {
 642        .name                   = "cpudump",
 643        .func                   = ftrace_cpudump_callback,
 644};
 645
 646static int __init init_func_cmd_traceon(void)
 647{
 648        int ret;
 649
 650        ret = register_ftrace_command(&ftrace_traceoff_cmd);
 651        if (ret)
 652                return ret;
 653
 654        ret = register_ftrace_command(&ftrace_traceon_cmd);
 655        if (ret)
 656                goto out_free_traceoff;
 657
 658        ret = register_ftrace_command(&ftrace_stacktrace_cmd);
 659        if (ret)
 660                goto out_free_traceon;
 661
 662        ret = register_ftrace_command(&ftrace_dump_cmd);
 663        if (ret)
 664                goto out_free_stacktrace;
 665
 666        ret = register_ftrace_command(&ftrace_cpudump_cmd);
 667        if (ret)
 668                goto out_free_dump;
 669
 670        return 0;
 671
 672 out_free_dump:
 673        unregister_ftrace_command(&ftrace_dump_cmd);
 674 out_free_stacktrace:
 675        unregister_ftrace_command(&ftrace_stacktrace_cmd);
 676 out_free_traceon:
 677        unregister_ftrace_command(&ftrace_traceon_cmd);
 678 out_free_traceoff:
 679        unregister_ftrace_command(&ftrace_traceoff_cmd);
 680
 681        return ret;
 682}
 683#else
 684static inline int init_func_cmd_traceon(void)
 685{
 686        return 0;
 687}
 688#endif /* CONFIG_DYNAMIC_FTRACE */
 689
 690static __init int init_function_trace(void)
 691{
 692        init_func_cmd_traceon();
 693        return register_tracer(&function_trace);
 694}
 695core_initcall(init_function_trace);
 696