linux/kernel/trace/trace_functions_graph.c
<<
>>
Prefs
   1/*
   2 *
   3 * Function graph tracer.
   4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   5 * Mostly borrowed from function tracer which
   6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   7 *
   8 */
   9#include <linux/debugfs.h>
  10#include <linux/uaccess.h>
  11#include <linux/ftrace.h>
  12#include <linux/fs.h>
  13
  14#include "trace.h"
  15#include "trace_output.h"
  16
  17struct fgraph_data {
  18        pid_t           last_pid;
  19        int             depth;
  20};
  21
  22#define TRACE_GRAPH_INDENT      2
  23
  24/* Flag options */
  25#define TRACE_GRAPH_PRINT_OVERRUN       0x1
  26#define TRACE_GRAPH_PRINT_CPU           0x2
  27#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
  28#define TRACE_GRAPH_PRINT_PROC          0x8
  29#define TRACE_GRAPH_PRINT_DURATION      0x10
  30#define TRACE_GRAPH_PRINT_ABS_TIME      0X20
  31
  32static struct tracer_opt trace_opts[] = {
  33        /* Display overruns? (for self-debug purpose) */
  34        { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  35        /* Display CPU ? */
  36        { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  37        /* Display Overhead ? */
  38        { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  39        /* Display proc name/pid */
  40        { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  41        /* Display duration of execution */
  42        { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  43        /* Display absolute time of an entry */
  44        { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  45        { } /* Empty entry */
  46};
  47
  48static struct tracer_flags tracer_flags = {
  49        /* Don't display overruns and proc by default */
  50        .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  51               TRACE_GRAPH_PRINT_DURATION,
  52        .opts = trace_opts
  53};
  54
  55static struct trace_array *graph_array;
  56
  57
  58/* Add a function return address to the trace stack on thread info.*/
  59int
  60ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  61                         unsigned long frame_pointer)
  62{
  63        unsigned long long calltime;
  64        int index;
  65
  66        if (!current->ret_stack)
  67                return -EBUSY;
  68
  69        /*
  70         * We must make sure the ret_stack is tested before we read
  71         * anything else.
  72         */
  73        smp_rmb();
  74
  75        /* The return trace stack is full */
  76        if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  77                atomic_inc(&current->trace_overrun);
  78                return -EBUSY;
  79        }
  80
  81        calltime = trace_clock_local();
  82
  83        index = ++current->curr_ret_stack;
  84        barrier();
  85        current->ret_stack[index].ret = ret;
  86        current->ret_stack[index].func = func;
  87        current->ret_stack[index].calltime = calltime;
  88        current->ret_stack[index].subtime = 0;
  89        current->ret_stack[index].fp = frame_pointer;
  90        *depth = index;
  91
  92        return 0;
  93}
  94
  95/* Retrieve a function return address to the trace stack on thread info.*/
  96static void
  97ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  98                        unsigned long frame_pointer)
  99{
 100        int index;
 101
 102        index = current->curr_ret_stack;
 103
 104        if (unlikely(index < 0)) {
 105                ftrace_graph_stop();
 106                WARN_ON(1);
 107                /* Might as well panic, otherwise we have no where to go */
 108                *ret = (unsigned long)panic;
 109                return;
 110        }
 111
 112#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
 113        /*
 114         * The arch may choose to record the frame pointer used
 115         * and check it here to make sure that it is what we expect it
 116         * to be. If gcc does not set the place holder of the return
 117         * address in the frame pointer, and does a copy instead, then
 118         * the function graph trace will fail. This test detects this
 119         * case.
 120         *
 121         * Currently, x86_32 with optimize for size (-Os) makes the latest
 122         * gcc do the above.
 123         */
 124        if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
 125                ftrace_graph_stop();
 126                WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
 127                     "  from func %ps return to %lx\n",
 128                     current->ret_stack[index].fp,
 129                     frame_pointer,
 130                     (void *)current->ret_stack[index].func,
 131                     current->ret_stack[index].ret);
 132                *ret = (unsigned long)panic;
 133                return;
 134        }
 135#endif
 136
 137        *ret = current->ret_stack[index].ret;
 138        trace->func = current->ret_stack[index].func;
 139        trace->calltime = current->ret_stack[index].calltime;
 140        trace->overrun = atomic_read(&current->trace_overrun);
 141        trace->depth = index;
 142}
 143
 144/*
 145 * Send the trace to the ring-buffer.
 146 * @return the original return address.
 147 */
 148unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 149{
 150        struct ftrace_graph_ret trace;
 151        unsigned long ret;
 152
 153        ftrace_pop_return_trace(&trace, &ret, frame_pointer);
 154        trace.rettime = trace_clock_local();
 155        ftrace_graph_return(&trace);
 156        barrier();
 157        current->curr_ret_stack--;
 158
 159        if (unlikely(!ret)) {
 160                ftrace_graph_stop();
 161                WARN_ON(1);
 162                /* Might as well panic. What else to do? */
 163                ret = (unsigned long)panic;
 164        }
 165
 166        return ret;
 167}
 168
 169static int __trace_graph_entry(struct trace_array *tr,
 170                                struct ftrace_graph_ent *trace,
 171                                unsigned long flags,
 172                                int pc)
 173{
 174        struct ftrace_event_call *call = &event_funcgraph_entry;
 175        struct ring_buffer_event *event;
 176        struct ring_buffer *buffer = tr->buffer;
 177        struct ftrace_graph_ent_entry *entry;
 178
 179        if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
 180                return 0;
 181
 182        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 183                                          sizeof(*entry), flags, pc);
 184        if (!event)
 185                return 0;
 186        entry   = ring_buffer_event_data(event);
 187        entry->graph_ent                        = *trace;
 188        if (!filter_current_check_discard(buffer, call, entry, event))
 189                ring_buffer_unlock_commit(buffer, event);
 190
 191        return 1;
 192}
 193
 194int trace_graph_entry(struct ftrace_graph_ent *trace)
 195{
 196        struct trace_array *tr = graph_array;
 197        struct trace_array_cpu *data;
 198        unsigned long flags;
 199        long disabled;
 200        int ret;
 201        int cpu;
 202        int pc;
 203
 204        if (unlikely(!tr))
 205                return 0;
 206
 207        if (!ftrace_trace_task(current))
 208                return 0;
 209
 210        if (!ftrace_graph_addr(trace->func))
 211                return 0;
 212
 213        local_irq_save(flags);
 214        cpu = raw_smp_processor_id();
 215        data = tr->data[cpu];
 216        disabled = atomic_inc_return(&data->disabled);
 217        if (likely(disabled == 1)) {
 218                pc = preempt_count();
 219                ret = __trace_graph_entry(tr, trace, flags, pc);
 220        } else {
 221                ret = 0;
 222        }
 223        /* Only do the atomic if it is not already set */
 224        if (!test_tsk_trace_graph(current))
 225                set_tsk_trace_graph(current);
 226
 227        atomic_dec(&data->disabled);
 228        local_irq_restore(flags);
 229
 230        return ret;
 231}
 232
 233static void __trace_graph_return(struct trace_array *tr,
 234                                struct ftrace_graph_ret *trace,
 235                                unsigned long flags,
 236                                int pc)
 237{
 238        struct ftrace_event_call *call = &event_funcgraph_exit;
 239        struct ring_buffer_event *event;
 240        struct ring_buffer *buffer = tr->buffer;
 241        struct ftrace_graph_ret_entry *entry;
 242
 243        if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
 244                return;
 245
 246        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 247                                          sizeof(*entry), flags, pc);
 248        if (!event)
 249                return;
 250        entry   = ring_buffer_event_data(event);
 251        entry->ret                              = *trace;
 252        if (!filter_current_check_discard(buffer, call, entry, event))
 253                ring_buffer_unlock_commit(buffer, event);
 254}
 255
 256void trace_graph_return(struct ftrace_graph_ret *trace)
 257{
 258        struct trace_array *tr = graph_array;
 259        struct trace_array_cpu *data;
 260        unsigned long flags;
 261        long disabled;
 262        int cpu;
 263        int pc;
 264
 265        local_irq_save(flags);
 266        cpu = raw_smp_processor_id();
 267        data = tr->data[cpu];
 268        disabled = atomic_inc_return(&data->disabled);
 269        if (likely(disabled == 1)) {
 270                pc = preempt_count();
 271                __trace_graph_return(tr, trace, flags, pc);
 272        }
 273        if (!trace->depth)
 274                clear_tsk_trace_graph(current);
 275        atomic_dec(&data->disabled);
 276        local_irq_restore(flags);
 277}
 278
 279static int graph_trace_init(struct trace_array *tr)
 280{
 281        int ret;
 282
 283        graph_array = tr;
 284        ret = register_ftrace_graph(&trace_graph_return,
 285                                    &trace_graph_entry);
 286        if (ret)
 287                return ret;
 288        tracing_start_cmdline_record();
 289
 290        return 0;
 291}
 292
 293void set_graph_array(struct trace_array *tr)
 294{
 295        graph_array = tr;
 296}
 297
 298static void graph_trace_reset(struct trace_array *tr)
 299{
 300        tracing_stop_cmdline_record();
 301        unregister_ftrace_graph();
 302}
 303
 304static int max_bytes_for_cpu;
 305
 306static enum print_line_t
 307print_graph_cpu(struct trace_seq *s, int cpu)
 308{
 309        int ret;
 310
 311        /*
 312         * Start with a space character - to make it stand out
 313         * to the right a bit when trace output is pasted into
 314         * email:
 315         */
 316        ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 317        if (!ret)
 318                return TRACE_TYPE_PARTIAL_LINE;
 319
 320        return TRACE_TYPE_HANDLED;
 321}
 322
 323#define TRACE_GRAPH_PROCINFO_LENGTH     14
 324
 325static enum print_line_t
 326print_graph_proc(struct trace_seq *s, pid_t pid)
 327{
 328        char comm[TASK_COMM_LEN];
 329        /* sign + log10(MAX_INT) + '\0' */
 330        char pid_str[11];
 331        int spaces = 0;
 332        int ret;
 333        int len;
 334        int i;
 335
 336        trace_find_cmdline(pid, comm);
 337        comm[7] = '\0';
 338        sprintf(pid_str, "%d", pid);
 339
 340        /* 1 stands for the "-" character */
 341        len = strlen(comm) + strlen(pid_str) + 1;
 342
 343        if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 344                spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 345
 346        /* First spaces to align center */
 347        for (i = 0; i < spaces / 2; i++) {
 348                ret = trace_seq_printf(s, " ");
 349                if (!ret)
 350                        return TRACE_TYPE_PARTIAL_LINE;
 351        }
 352
 353        ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
 354        if (!ret)
 355                return TRACE_TYPE_PARTIAL_LINE;
 356
 357        /* Last spaces to align center */
 358        for (i = 0; i < spaces - (spaces / 2); i++) {
 359                ret = trace_seq_printf(s, " ");
 360                if (!ret)
 361                        return TRACE_TYPE_PARTIAL_LINE;
 362        }
 363        return TRACE_TYPE_HANDLED;
 364}
 365
 366
 367static enum print_line_t
 368print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 369{
 370        if (!trace_seq_putc(s, ' '))
 371                return 0;
 372
 373        return trace_print_lat_fmt(s, entry);
 374}
 375
 376/* If the pid changed since the last trace, output this event */
 377static enum print_line_t
 378verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 379{
 380        pid_t prev_pid;
 381        pid_t *last_pid;
 382        int ret;
 383
 384        if (!data)
 385                return TRACE_TYPE_HANDLED;
 386
 387        last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
 388
 389        if (*last_pid == pid)
 390                return TRACE_TYPE_HANDLED;
 391
 392        prev_pid = *last_pid;
 393        *last_pid = pid;
 394
 395        if (prev_pid == -1)
 396                return TRACE_TYPE_HANDLED;
 397/*
 398 * Context-switch trace line:
 399
 400 ------------------------------------------
 401 | 1)  migration/0--1  =>  sshd-1755
 402 ------------------------------------------
 403
 404 */
 405        ret = trace_seq_printf(s,
 406                " ------------------------------------------\n");
 407        if (!ret)
 408                return TRACE_TYPE_PARTIAL_LINE;
 409
 410        ret = print_graph_cpu(s, cpu);
 411        if (ret == TRACE_TYPE_PARTIAL_LINE)
 412                return TRACE_TYPE_PARTIAL_LINE;
 413
 414        ret = print_graph_proc(s, prev_pid);
 415        if (ret == TRACE_TYPE_PARTIAL_LINE)
 416                return TRACE_TYPE_PARTIAL_LINE;
 417
 418        ret = trace_seq_printf(s, " => ");
 419        if (!ret)
 420                return TRACE_TYPE_PARTIAL_LINE;
 421
 422        ret = print_graph_proc(s, pid);
 423        if (ret == TRACE_TYPE_PARTIAL_LINE)
 424                return TRACE_TYPE_PARTIAL_LINE;
 425
 426        ret = trace_seq_printf(s,
 427                "\n ------------------------------------------\n\n");
 428        if (!ret)
 429                return TRACE_TYPE_PARTIAL_LINE;
 430
 431        return TRACE_TYPE_HANDLED;
 432}
 433
 434static struct ftrace_graph_ret_entry *
 435get_return_for_leaf(struct trace_iterator *iter,
 436                struct ftrace_graph_ent_entry *curr)
 437{
 438        struct ring_buffer_iter *ring_iter;
 439        struct ring_buffer_event *event;
 440        struct ftrace_graph_ret_entry *next;
 441
 442        ring_iter = iter->buffer_iter[iter->cpu];
 443
 444        /* First peek to compare current entry and the next one */
 445        if (ring_iter)
 446                event = ring_buffer_iter_peek(ring_iter, NULL);
 447        else {
 448        /* We need to consume the current entry to see the next one */
 449                ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
 450                event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
 451                                        NULL);
 452        }
 453
 454        if (!event)
 455                return NULL;
 456
 457        next = ring_buffer_event_data(event);
 458
 459        if (next->ent.type != TRACE_GRAPH_RET)
 460                return NULL;
 461
 462        if (curr->ent.pid != next->ent.pid ||
 463                        curr->graph_ent.func != next->ret.func)
 464                return NULL;
 465
 466        /* this is a leaf, now advance the iterator */
 467        if (ring_iter)
 468                ring_buffer_read(ring_iter, NULL);
 469
 470        return next;
 471}
 472
 473/* Signal a overhead of time execution to the output */
 474static int
 475print_graph_overhead(unsigned long long duration, struct trace_seq *s)
 476{
 477        /* If duration disappear, we don't need anything */
 478        if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
 479                return 1;
 480
 481        /* Non nested entry or return */
 482        if (duration == -1)
 483                return trace_seq_printf(s, "  ");
 484
 485        if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
 486                /* Duration exceeded 100 msecs */
 487                if (duration > 100000ULL)
 488                        return trace_seq_printf(s, "! ");
 489
 490                /* Duration exceeded 10 msecs */
 491                if (duration > 10000ULL)
 492                        return trace_seq_printf(s, "+ ");
 493        }
 494
 495        return trace_seq_printf(s, "  ");
 496}
 497
 498static int print_graph_abs_time(u64 t, struct trace_seq *s)
 499{
 500        unsigned long usecs_rem;
 501
 502        usecs_rem = do_div(t, NSEC_PER_SEC);
 503        usecs_rem /= 1000;
 504
 505        return trace_seq_printf(s, "%5lu.%06lu |  ",
 506                        (unsigned long)t, usecs_rem);
 507}
 508
 509static enum print_line_t
 510print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 511                enum trace_type type, int cpu, pid_t pid)
 512{
 513        int ret;
 514        struct trace_seq *s = &iter->seq;
 515
 516        if (addr < (unsigned long)__irqentry_text_start ||
 517                addr >= (unsigned long)__irqentry_text_end)
 518                return TRACE_TYPE_UNHANDLED;
 519
 520        /* Absolute time */
 521        if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
 522                ret = print_graph_abs_time(iter->ts, s);
 523                if (!ret)
 524                        return TRACE_TYPE_PARTIAL_LINE;
 525        }
 526
 527        /* Cpu */
 528        if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
 529                ret = print_graph_cpu(s, cpu);
 530                if (ret == TRACE_TYPE_PARTIAL_LINE)
 531                        return TRACE_TYPE_PARTIAL_LINE;
 532        }
 533
 534        /* Proc */
 535        if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
 536                ret = print_graph_proc(s, pid);
 537                if (ret == TRACE_TYPE_PARTIAL_LINE)
 538                        return TRACE_TYPE_PARTIAL_LINE;
 539                ret = trace_seq_printf(s, " | ");
 540                if (!ret)
 541                        return TRACE_TYPE_PARTIAL_LINE;
 542        }
 543
 544        /* No overhead */
 545        ret = print_graph_overhead(-1, s);
 546        if (!ret)
 547                return TRACE_TYPE_PARTIAL_LINE;
 548
 549        if (type == TRACE_GRAPH_ENT)
 550                ret = trace_seq_printf(s, "==========>");
 551        else
 552                ret = trace_seq_printf(s, "<==========");
 553
 554        if (!ret)
 555                return TRACE_TYPE_PARTIAL_LINE;
 556
 557        /* Don't close the duration column if haven't one */
 558        if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
 559                trace_seq_printf(s, " |");
 560        ret = trace_seq_printf(s, "\n");
 561
 562        if (!ret)
 563                return TRACE_TYPE_PARTIAL_LINE;
 564        return TRACE_TYPE_HANDLED;
 565}
 566
 567enum print_line_t
 568trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 569{
 570        unsigned long nsecs_rem = do_div(duration, 1000);
 571        /* log10(ULONG_MAX) + '\0' */
 572        char msecs_str[21];
 573        char nsecs_str[5];
 574        int ret, len;
 575        int i;
 576
 577        sprintf(msecs_str, "%lu", (unsigned long) duration);
 578
 579        /* Print msecs */
 580        ret = trace_seq_printf(s, "%s", msecs_str);
 581        if (!ret)
 582                return TRACE_TYPE_PARTIAL_LINE;
 583
 584        len = strlen(msecs_str);
 585
 586        /* Print nsecs (we don't want to exceed 7 numbers) */
 587        if (len < 7) {
 588                snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
 589                ret = trace_seq_printf(s, ".%s", nsecs_str);
 590                if (!ret)
 591                        return TRACE_TYPE_PARTIAL_LINE;
 592                len += strlen(nsecs_str);
 593        }
 594
 595        ret = trace_seq_printf(s, " us ");
 596        if (!ret)
 597                return TRACE_TYPE_PARTIAL_LINE;
 598
 599        /* Print remaining spaces to fit the row's width */
 600        for (i = len; i < 7; i++) {
 601                ret = trace_seq_printf(s, " ");
 602                if (!ret)
 603                        return TRACE_TYPE_PARTIAL_LINE;
 604        }
 605        return TRACE_TYPE_HANDLED;
 606}
 607
 608static enum print_line_t
 609print_graph_duration(unsigned long long duration, struct trace_seq *s)
 610{
 611        int ret;
 612
 613        ret = trace_print_graph_duration(duration, s);
 614        if (ret != TRACE_TYPE_HANDLED)
 615                return ret;
 616
 617        ret = trace_seq_printf(s, "|  ");
 618        if (!ret)
 619                return TRACE_TYPE_PARTIAL_LINE;
 620
 621        return TRACE_TYPE_HANDLED;
 622}
 623
 624/* Case of a leaf function on its call entry */
 625static enum print_line_t
 626print_graph_entry_leaf(struct trace_iterator *iter,
 627                struct ftrace_graph_ent_entry *entry,
 628                struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
 629{
 630        struct fgraph_data *data = iter->private;
 631        struct ftrace_graph_ret *graph_ret;
 632        struct ftrace_graph_ent *call;
 633        unsigned long long duration;
 634        int ret;
 635        int i;
 636
 637        graph_ret = &ret_entry->ret;
 638        call = &entry->graph_ent;
 639        duration = graph_ret->rettime - graph_ret->calltime;
 640
 641        if (data) {
 642                int cpu = iter->cpu;
 643                int *depth = &(per_cpu_ptr(data, cpu)->depth);
 644
 645                /*
 646                 * Comments display at + 1 to depth. Since
 647                 * this is a leaf function, keep the comments
 648                 * equal to this depth.
 649                 */
 650                *depth = call->depth - 1;
 651        }
 652
 653        /* Overhead */
 654        ret = print_graph_overhead(duration, s);
 655        if (!ret)
 656                return TRACE_TYPE_PARTIAL_LINE;
 657
 658        /* Duration */
 659        if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
 660                ret = print_graph_duration(duration, s);
 661                if (ret == TRACE_TYPE_PARTIAL_LINE)
 662                        return TRACE_TYPE_PARTIAL_LINE;
 663        }
 664
 665        /* Function */
 666        for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
 667                ret = trace_seq_printf(s, " ");
 668                if (!ret)
 669                        return TRACE_TYPE_PARTIAL_LINE;
 670        }
 671
 672        ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
 673        if (!ret)
 674                return TRACE_TYPE_PARTIAL_LINE;
 675
 676        return TRACE_TYPE_HANDLED;
 677}
 678
 679static enum print_line_t
 680print_graph_entry_nested(struct trace_iterator *iter,
 681                         struct ftrace_graph_ent_entry *entry,
 682                         struct trace_seq *s, int cpu)
 683{
 684        struct ftrace_graph_ent *call = &entry->graph_ent;
 685        struct fgraph_data *data = iter->private;
 686        int ret;
 687        int i;
 688
 689        if (data) {
 690                int cpu = iter->cpu;
 691                int *depth = &(per_cpu_ptr(data, cpu)->depth);
 692
 693                *depth = call->depth;
 694        }
 695
 696        /* No overhead */
 697        ret = print_graph_overhead(-1, s);
 698        if (!ret)
 699                return TRACE_TYPE_PARTIAL_LINE;
 700
 701        /* No time */
 702        if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
 703                ret = trace_seq_printf(s, "            |  ");
 704                if (!ret)
 705                        return TRACE_TYPE_PARTIAL_LINE;
 706        }
 707
 708        /* Function */
 709        for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
 710                ret = trace_seq_printf(s, " ");
 711                if (!ret)
 712                        return TRACE_TYPE_PARTIAL_LINE;
 713        }
 714
 715        ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
 716        if (!ret)
 717                return TRACE_TYPE_PARTIAL_LINE;
 718
 719        /*
 720         * we already consumed the current entry to check the next one
 721         * and see if this is a leaf.
 722         */
 723        return TRACE_TYPE_NO_CONSUME;
 724}
 725
 726static enum print_line_t
 727print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 728                     int type, unsigned long addr)
 729{
 730        struct fgraph_data *data = iter->private;
 731        struct trace_entry *ent = iter->ent;
 732        int cpu = iter->cpu;
 733        int ret;
 734
 735        /* Pid */
 736        if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
 737                return TRACE_TYPE_PARTIAL_LINE;
 738
 739        if (type) {
 740                /* Interrupt */
 741                ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
 742                if (ret == TRACE_TYPE_PARTIAL_LINE)
 743                        return TRACE_TYPE_PARTIAL_LINE;
 744        }
 745
 746        /* Absolute time */
 747        if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
 748                ret = print_graph_abs_time(iter->ts, s);
 749                if (!ret)
 750                        return TRACE_TYPE_PARTIAL_LINE;
 751        }
 752
 753        /* Cpu */
 754        if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
 755                ret = print_graph_cpu(s, cpu);
 756                if (ret == TRACE_TYPE_PARTIAL_LINE)
 757                        return TRACE_TYPE_PARTIAL_LINE;
 758        }
 759
 760        /* Proc */
 761        if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
 762                ret = print_graph_proc(s, ent->pid);
 763                if (ret == TRACE_TYPE_PARTIAL_LINE)
 764                        return TRACE_TYPE_PARTIAL_LINE;
 765
 766                ret = trace_seq_printf(s, " | ");
 767                if (!ret)
 768                        return TRACE_TYPE_PARTIAL_LINE;
 769        }
 770
 771        /* Latency format */
 772        if (trace_flags & TRACE_ITER_LATENCY_FMT) {
 773                ret = print_graph_lat_fmt(s, ent);
 774                if (ret == TRACE_TYPE_PARTIAL_LINE)
 775                        return TRACE_TYPE_PARTIAL_LINE;
 776        }
 777
 778        return 0;
 779}
 780
 781static enum print_line_t
 782print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
 783                        struct trace_iterator *iter)
 784{
 785        int cpu = iter->cpu;
 786        struct ftrace_graph_ent *call = &field->graph_ent;
 787        struct ftrace_graph_ret_entry *leaf_ret;
 788
 789        if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
 790                return TRACE_TYPE_PARTIAL_LINE;
 791
 792        leaf_ret = get_return_for_leaf(iter, field);
 793        if (leaf_ret)
 794                return print_graph_entry_leaf(iter, field, leaf_ret, s);
 795        else
 796                return print_graph_entry_nested(iter, field, s, cpu);
 797
 798}
 799
 800static enum print_line_t
 801print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
 802                   struct trace_entry *ent, struct trace_iterator *iter)
 803{
 804        unsigned long long duration = trace->rettime - trace->calltime;
 805        struct fgraph_data *data = iter->private;
 806        pid_t pid = ent->pid;
 807        int cpu = iter->cpu;
 808        int ret;
 809        int i;
 810
 811        if (data) {
 812                int cpu = iter->cpu;
 813                int *depth = &(per_cpu_ptr(data, cpu)->depth);
 814
 815                /*
 816                 * Comments display at + 1 to depth. This is the
 817                 * return from a function, we now want the comments
 818                 * to display at the same level of the bracket.
 819                 */
 820                *depth = trace->depth - 1;
 821        }
 822
 823        if (print_graph_prologue(iter, s, 0, 0))
 824                return TRACE_TYPE_PARTIAL_LINE;
 825
 826        /* Overhead */
 827        ret = print_graph_overhead(duration, s);
 828        if (!ret)
 829                return TRACE_TYPE_PARTIAL_LINE;
 830
 831        /* Duration */
 832        if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
 833                ret = print_graph_duration(duration, s);
 834                if (ret == TRACE_TYPE_PARTIAL_LINE)
 835                        return TRACE_TYPE_PARTIAL_LINE;
 836        }
 837
 838        /* Closing brace */
 839        for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
 840                ret = trace_seq_printf(s, " ");
 841                if (!ret)
 842                        return TRACE_TYPE_PARTIAL_LINE;
 843        }
 844
 845        ret = trace_seq_printf(s, "}\n");
 846        if (!ret)
 847                return TRACE_TYPE_PARTIAL_LINE;
 848
 849        /* Overrun */
 850        if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
 851                ret = trace_seq_printf(s, " (Overruns: %lu)\n",
 852                                        trace->overrun);
 853                if (!ret)
 854                        return TRACE_TYPE_PARTIAL_LINE;
 855        }
 856
 857        ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
 858        if (ret == TRACE_TYPE_PARTIAL_LINE)
 859                return TRACE_TYPE_PARTIAL_LINE;
 860
 861        return TRACE_TYPE_HANDLED;
 862}
 863
 864static enum print_line_t
 865print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
 866                    struct trace_iterator *iter)
 867{
 868        unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
 869        struct fgraph_data *data = iter->private;
 870        struct trace_event *event;
 871        int depth = 0;
 872        int ret;
 873        int i;
 874
 875        if (data)
 876                depth = per_cpu_ptr(data, iter->cpu)->depth;
 877
 878        if (print_graph_prologue(iter, s, 0, 0))
 879                return TRACE_TYPE_PARTIAL_LINE;
 880
 881        /* No overhead */
 882        ret = print_graph_overhead(-1, s);
 883        if (!ret)
 884                return TRACE_TYPE_PARTIAL_LINE;
 885
 886        /* No time */
 887        if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
 888                ret = trace_seq_printf(s, "            |  ");
 889                if (!ret)
 890                        return TRACE_TYPE_PARTIAL_LINE;
 891        }
 892
 893        /* Indentation */
 894        if (depth > 0)
 895                for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
 896                        ret = trace_seq_printf(s, " ");
 897                        if (!ret)
 898                                return TRACE_TYPE_PARTIAL_LINE;
 899                }
 900
 901        /* The comment */
 902        ret = trace_seq_printf(s, "/* ");
 903        if (!ret)
 904                return TRACE_TYPE_PARTIAL_LINE;
 905
 906        switch (iter->ent->type) {
 907        case TRACE_BPRINT:
 908                ret = trace_print_bprintk_msg_only(iter);
 909                if (ret != TRACE_TYPE_HANDLED)
 910                        return ret;
 911                break;
 912        case TRACE_PRINT:
 913                ret = trace_print_printk_msg_only(iter);
 914                if (ret != TRACE_TYPE_HANDLED)
 915                        return ret;
 916                break;
 917        default:
 918                event = ftrace_find_event(ent->type);
 919                if (!event)
 920                        return TRACE_TYPE_UNHANDLED;
 921
 922                ret = event->trace(iter, sym_flags);
 923                if (ret != TRACE_TYPE_HANDLED)
 924                        return ret;
 925        }
 926
 927        /* Strip ending newline */
 928        if (s->buffer[s->len - 1] == '\n') {
 929                s->buffer[s->len - 1] = '\0';
 930                s->len--;
 931        }
 932
 933        ret = trace_seq_printf(s, " */\n");
 934        if (!ret)
 935                return TRACE_TYPE_PARTIAL_LINE;
 936
 937        return TRACE_TYPE_HANDLED;
 938}
 939
 940
 941enum print_line_t
 942print_graph_function(struct trace_iterator *iter)
 943{
 944        struct trace_entry *entry = iter->ent;
 945        struct trace_seq *s = &iter->seq;
 946
 947        switch (entry->type) {
 948        case TRACE_GRAPH_ENT: {
 949                /*
 950                 * print_graph_entry() may consume the current event,
 951                 * thus @field may become invalid, so we need to save it.
 952                 * sizeof(struct ftrace_graph_ent_entry) is very small,
 953                 * it can be safely saved at the stack.
 954                 */
 955                struct ftrace_graph_ent_entry *field, saved;
 956                trace_assign_type(field, entry);
 957                saved = *field;
 958                return print_graph_entry(&saved, s, iter);
 959        }
 960        case TRACE_GRAPH_RET: {
 961                struct ftrace_graph_ret_entry *field;
 962                trace_assign_type(field, entry);
 963                return print_graph_return(&field->ret, s, entry, iter);
 964        }
 965        default:
 966                return print_graph_comment(s, entry, iter);
 967        }
 968
 969        return TRACE_TYPE_HANDLED;
 970}
 971
 972static void print_lat_header(struct seq_file *s)
 973{
 974        static const char spaces[] = "                " /* 16 spaces */
 975                "    "                                  /* 4 spaces */
 976                "                 ";                    /* 17 spaces */
 977        int size = 0;
 978
 979        if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
 980                size += 16;
 981        if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
 982                size += 4;
 983        if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
 984                size += 17;
 985
 986        seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
 987        seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
 988        seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
 989        seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
 990        seq_printf(s, "#%.*s||| / _-=> lock-depth      \n", size, spaces);
 991        seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
 992}
 993
 994static void print_graph_headers(struct seq_file *s)
 995{
 996        int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
 997
 998        if (lat)
 999                print_lat_header(s);
1000
1001        /* 1st line */
1002        seq_printf(s, "#");
1003        if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1004                seq_printf(s, "     TIME       ");
1005        if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1006                seq_printf(s, " CPU");
1007        if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1008                seq_printf(s, "  TASK/PID       ");
1009        if (lat)
1010                seq_printf(s, "|||||");
1011        if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1012                seq_printf(s, "  DURATION   ");
1013        seq_printf(s, "               FUNCTION CALLS\n");
1014
1015        /* 2nd line */
1016        seq_printf(s, "#");
1017        if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1018                seq_printf(s, "      |         ");
1019        if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1020                seq_printf(s, " |  ");
1021        if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1022                seq_printf(s, "   |    |        ");
1023        if (lat)
1024                seq_printf(s, "|||||");
1025        if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1026                seq_printf(s, "   |   |      ");
1027        seq_printf(s, "               |   |   |   |\n");
1028}
1029
1030static void graph_trace_open(struct trace_iterator *iter)
1031{
1032        /* pid and depth on the last trace processed */
1033        struct fgraph_data *data = alloc_percpu(struct fgraph_data);
1034        int cpu;
1035
1036        if (!data)
1037                pr_warning("function graph tracer: not enough memory\n");
1038        else
1039                for_each_possible_cpu(cpu) {
1040                        pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
1041                        int *depth = &(per_cpu_ptr(data, cpu)->depth);
1042                        *pid = -1;
1043                        *depth = 0;
1044                }
1045
1046        iter->private = data;
1047}
1048
1049static void graph_trace_close(struct trace_iterator *iter)
1050{
1051        free_percpu(iter->private);
1052}
1053
1054static struct tracer graph_trace __read_mostly = {
1055        .name           = "function_graph",
1056        .open           = graph_trace_open,
1057        .close          = graph_trace_close,
1058        .wait_pipe      = poll_wait_pipe,
1059        .init           = graph_trace_init,
1060        .reset          = graph_trace_reset,
1061        .print_line     = print_graph_function,
1062        .print_header   = print_graph_headers,
1063        .flags          = &tracer_flags,
1064#ifdef CONFIG_FTRACE_SELFTEST
1065        .selftest       = trace_selftest_startup_function_graph,
1066#endif
1067};
1068
1069static __init int init_graph_trace(void)
1070{
1071        max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1072
1073        return register_tracer(&graph_trace);
1074}
1075
1076device_initcall(init_graph_trace);
1077