linux/kernel/trace/trace_functions_graph.c
<<
>>
Prefs
   1/*
   2 *
   3 * Function graph tracer.
   4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   5 * Mostly borrowed from function tracer which
   6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   7 *
   8 */
   9#include <linux/debugfs.h>
  10#include <linux/uaccess.h>
  11#include <linux/ftrace.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14
  15#include "trace.h"
  16#include "trace_output.h"
  17
  18/* When set, irq functions will be ignored */
  19static int ftrace_graph_skip_irqs;
  20
  21struct fgraph_cpu_data {
  22        pid_t           last_pid;
  23        int             depth;
  24        int             depth_irq;
  25        int             ignore;
  26        unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
  27};
  28
  29struct fgraph_data {
  30        struct fgraph_cpu_data __percpu *cpu_data;
  31
  32        /* Place to preserve last processed entry. */
  33        struct ftrace_graph_ent_entry   ent;
  34        struct ftrace_graph_ret_entry   ret;
  35        int                             failed;
  36        int                             cpu;
  37};
  38
  39#define TRACE_GRAPH_INDENT      2
  40
  41/* Flag options */
  42#define TRACE_GRAPH_PRINT_OVERRUN       0x1
  43#define TRACE_GRAPH_PRINT_CPU           0x2
  44#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
  45#define TRACE_GRAPH_PRINT_PROC          0x8
  46#define TRACE_GRAPH_PRINT_DURATION      0x10
  47#define TRACE_GRAPH_PRINT_ABS_TIME      0x20
  48#define TRACE_GRAPH_PRINT_IRQS          0x40
  49
  50static struct tracer_opt trace_opts[] = {
  51        /* Display overruns? (for self-debug purpose) */
  52        { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  53        /* Display CPU ? */
  54        { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  55        /* Display Overhead ? */
  56        { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  57        /* Display proc name/pid */
  58        { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  59        /* Display duration of execution */
  60        { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  61        /* Display absolute time of an entry */
  62        { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  63        /* Display interrupts */
  64        { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  65        { } /* Empty entry */
  66};
  67
  68static struct tracer_flags tracer_flags = {
  69        /* Don't display overruns and proc by default */
  70        .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  71               TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
  72        .opts = trace_opts
  73};
  74
  75static struct trace_array *graph_array;
  76
  77/*
  78 * DURATION column is being also used to display IRQ signs,
  79 * following values are used by print_graph_irq and others
  80 * to fill in space into DURATION column.
  81 */
  82enum {
  83        DURATION_FILL_FULL  = -1,
  84        DURATION_FILL_START = -2,
  85        DURATION_FILL_END   = -3,
  86};
  87
  88static enum print_line_t
  89print_graph_duration(unsigned long long duration, struct trace_seq *s,
  90                     u32 flags);
  91
  92/* Add a function return address to the trace stack on thread info.*/
  93int
  94ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  95                         unsigned long frame_pointer)
  96{
  97        unsigned long long calltime;
  98        int index;
  99
 100        if (!current->ret_stack)
 101                return -EBUSY;
 102
 103        /*
 104         * We must make sure the ret_stack is tested before we read
 105         * anything else.
 106         */
 107        smp_rmb();
 108
 109        /* The return trace stack is full */
 110        if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
 111                atomic_inc(&current->trace_overrun);
 112                return -EBUSY;
 113        }
 114
 115        calltime = trace_clock_local();
 116
 117        index = ++current->curr_ret_stack;
 118        barrier();
 119        current->ret_stack[index].ret = ret;
 120        current->ret_stack[index].func = func;
 121        current->ret_stack[index].calltime = calltime;
 122        current->ret_stack[index].subtime = 0;
 123        current->ret_stack[index].fp = frame_pointer;
 124        *depth = index;
 125
 126        return 0;
 127}
 128
 129/* Retrieve a function return address to the trace stack on thread info.*/
 130static void
 131ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
 132                        unsigned long frame_pointer)
 133{
 134        int index;
 135
 136        index = current->curr_ret_stack;
 137
 138        if (unlikely(index < 0)) {
 139                ftrace_graph_stop();
 140                WARN_ON(1);
 141                /* Might as well panic, otherwise we have no where to go */
 142                *ret = (unsigned long)panic;
 143                return;
 144        }
 145
 146#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
 147        /*
 148         * The arch may choose to record the frame pointer used
 149         * and check it here to make sure that it is what we expect it
 150         * to be. If gcc does not set the place holder of the return
 151         * address in the frame pointer, and does a copy instead, then
 152         * the function graph trace will fail. This test detects this
 153         * case.
 154         *
 155         * Currently, x86_32 with optimize for size (-Os) makes the latest
 156         * gcc do the above.
 157         */
 158        if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
 159                ftrace_graph_stop();
 160                WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
 161                     "  from func %ps return to %lx\n",
 162                     current->ret_stack[index].fp,
 163                     frame_pointer,
 164                     (void *)current->ret_stack[index].func,
 165                     current->ret_stack[index].ret);
 166                *ret = (unsigned long)panic;
 167                return;
 168        }
 169#endif
 170
 171        *ret = current->ret_stack[index].ret;
 172        trace->func = current->ret_stack[index].func;
 173        trace->calltime = current->ret_stack[index].calltime;
 174        trace->overrun = atomic_read(&current->trace_overrun);
 175        trace->depth = index;
 176}
 177
 178/*
 179 * Send the trace to the ring-buffer.
 180 * @return the original return address.
 181 */
 182unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 183{
 184        struct ftrace_graph_ret trace;
 185        unsigned long ret;
 186
 187        ftrace_pop_return_trace(&trace, &ret, frame_pointer);
 188        trace.rettime = trace_clock_local();
 189        ftrace_graph_return(&trace);
 190        barrier();
 191        current->curr_ret_stack--;
 192
 193        if (unlikely(!ret)) {
 194                ftrace_graph_stop();
 195                WARN_ON(1);
 196                /* Might as well panic. What else to do? */
 197                ret = (unsigned long)panic;
 198        }
 199
 200        return ret;
 201}
 202
 203int __trace_graph_entry(struct trace_array *tr,
 204                                struct ftrace_graph_ent *trace,
 205                                unsigned long flags,
 206                                int pc)
 207{
 208        struct ftrace_event_call *call = &event_funcgraph_entry;
 209        struct ring_buffer_event *event;
 210        struct ring_buffer *buffer = tr->buffer;
 211        struct ftrace_graph_ent_entry *entry;
 212
 213        if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
 214                return 0;
 215
 216        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 217                                          sizeof(*entry), flags, pc);
 218        if (!event)
 219                return 0;
 220        entry   = ring_buffer_event_data(event);
 221        entry->graph_ent                        = *trace;
 222        if (!filter_current_check_discard(buffer, call, entry, event))
 223                ring_buffer_unlock_commit(buffer, event);
 224
 225        return 1;
 226}
 227
 228static inline int ftrace_graph_ignore_irqs(void)
 229{
 230        if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 231                return 0;
 232
 233        return in_irq();
 234}
 235
 236int trace_graph_entry(struct ftrace_graph_ent *trace)
 237{
 238        struct trace_array *tr = graph_array;
 239        struct trace_array_cpu *data;
 240        unsigned long flags;
 241        long disabled;
 242        int ret;
 243        int cpu;
 244        int pc;
 245
 246        if (!ftrace_trace_task(current))
 247                return 0;
 248
 249        /* trace it when it is-nested-in or is a function enabled. */
 250        if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
 251              ftrace_graph_ignore_irqs())
 252                return 0;
 253
 254        local_irq_save(flags);
 255        cpu = raw_smp_processor_id();
 256        data = tr->data[cpu];
 257        disabled = atomic_inc_return(&data->disabled);
 258        if (likely(disabled == 1)) {
 259                pc = preempt_count();
 260                ret = __trace_graph_entry(tr, trace, flags, pc);
 261        } else {
 262                ret = 0;
 263        }
 264
 265        atomic_dec(&data->disabled);
 266        local_irq_restore(flags);
 267
 268        return ret;
 269}
 270
 271int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
 272{
 273        if (tracing_thresh)
 274                return 1;
 275        else
 276                return trace_graph_entry(trace);
 277}
 278
 279static void
 280__trace_graph_function(struct trace_array *tr,
 281                unsigned long ip, unsigned long flags, int pc)
 282{
 283        u64 time = trace_clock_local();
 284        struct ftrace_graph_ent ent = {
 285                .func  = ip,
 286                .depth = 0,
 287        };
 288        struct ftrace_graph_ret ret = {
 289                .func     = ip,
 290                .depth    = 0,
 291                .calltime = time,
 292                .rettime  = time,
 293        };
 294
 295        __trace_graph_entry(tr, &ent, flags, pc);
 296        __trace_graph_return(tr, &ret, flags, pc);
 297}
 298
 299void
 300trace_graph_function(struct trace_array *tr,
 301                unsigned long ip, unsigned long parent_ip,
 302                unsigned long flags, int pc)
 303{
 304        __trace_graph_function(tr, ip, flags, pc);
 305}
 306
 307void __trace_graph_return(struct trace_array *tr,
 308                                struct ftrace_graph_ret *trace,
 309                                unsigned long flags,
 310                                int pc)
 311{
 312        struct ftrace_event_call *call = &event_funcgraph_exit;
 313        struct ring_buffer_event *event;
 314        struct ring_buffer *buffer = tr->buffer;
 315        struct ftrace_graph_ret_entry *entry;
 316
 317        if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
 318                return;
 319
 320        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 321                                          sizeof(*entry), flags, pc);
 322        if (!event)
 323                return;
 324        entry   = ring_buffer_event_data(event);
 325        entry->ret                              = *trace;
 326        if (!filter_current_check_discard(buffer, call, entry, event))
 327                ring_buffer_unlock_commit(buffer, event);
 328}
 329
 330void trace_graph_return(struct ftrace_graph_ret *trace)
 331{
 332        struct trace_array *tr = graph_array;
 333        struct trace_array_cpu *data;
 334        unsigned long flags;
 335        long disabled;
 336        int cpu;
 337        int pc;
 338
 339        local_irq_save(flags);
 340        cpu = raw_smp_processor_id();
 341        data = tr->data[cpu];
 342        disabled = atomic_inc_return(&data->disabled);
 343        if (likely(disabled == 1)) {
 344                pc = preempt_count();
 345                __trace_graph_return(tr, trace, flags, pc);
 346        }
 347        atomic_dec(&data->disabled);
 348        local_irq_restore(flags);
 349}
 350
 351void set_graph_array(struct trace_array *tr)
 352{
 353        graph_array = tr;
 354
 355        /* Make graph_array visible before we start tracing */
 356
 357        smp_mb();
 358}
 359
 360void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 361{
 362        if (tracing_thresh &&
 363            (trace->rettime - trace->calltime < tracing_thresh))
 364                return;
 365        else
 366                trace_graph_return(trace);
 367}
 368
 369static int graph_trace_init(struct trace_array *tr)
 370{
 371        int ret;
 372
 373        set_graph_array(tr);
 374        if (tracing_thresh)
 375                ret = register_ftrace_graph(&trace_graph_thresh_return,
 376                                            &trace_graph_thresh_entry);
 377        else
 378                ret = register_ftrace_graph(&trace_graph_return,
 379                                            &trace_graph_entry);
 380        if (ret)
 381                return ret;
 382        tracing_start_cmdline_record();
 383
 384        return 0;
 385}
 386
 387static void graph_trace_reset(struct trace_array *tr)
 388{
 389        tracing_stop_cmdline_record();
 390        unregister_ftrace_graph();
 391}
 392
 393static int max_bytes_for_cpu;
 394
 395static enum print_line_t
 396print_graph_cpu(struct trace_seq *s, int cpu)
 397{
 398        int ret;
 399
 400        /*
 401         * Start with a space character - to make it stand out
 402         * to the right a bit when trace output is pasted into
 403         * email:
 404         */
 405        ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 406        if (!ret)
 407                return TRACE_TYPE_PARTIAL_LINE;
 408
 409        return TRACE_TYPE_HANDLED;
 410}
 411
 412#define TRACE_GRAPH_PROCINFO_LENGTH     14
 413
 414static enum print_line_t
 415print_graph_proc(struct trace_seq *s, pid_t pid)
 416{
 417        char comm[TASK_COMM_LEN];
 418        /* sign + log10(MAX_INT) + '\0' */
 419        char pid_str[11];
 420        int spaces = 0;
 421        int ret;
 422        int len;
 423        int i;
 424
 425        trace_find_cmdline(pid, comm);
 426        comm[7] = '\0';
 427        sprintf(pid_str, "%d", pid);
 428
 429        /* 1 stands for the "-" character */
 430        len = strlen(comm) + strlen(pid_str) + 1;
 431
 432        if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 433                spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 434
 435        /* First spaces to align center */
 436        for (i = 0; i < spaces / 2; i++) {
 437                ret = trace_seq_printf(s, " ");
 438                if (!ret)
 439                        return TRACE_TYPE_PARTIAL_LINE;
 440        }
 441
 442        ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
 443        if (!ret)
 444                return TRACE_TYPE_PARTIAL_LINE;
 445
 446        /* Last spaces to align center */
 447        for (i = 0; i < spaces - (spaces / 2); i++) {
 448                ret = trace_seq_printf(s, " ");
 449                if (!ret)
 450                        return TRACE_TYPE_PARTIAL_LINE;
 451        }
 452        return TRACE_TYPE_HANDLED;
 453}
 454
 455
 456static enum print_line_t
 457print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 458{
 459        if (!trace_seq_putc(s, ' '))
 460                return 0;
 461
 462        return trace_print_lat_fmt(s, entry);
 463}
 464
 465/* If the pid changed since the last trace, output this event */
 466static enum print_line_t
 467verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 468{
 469        pid_t prev_pid;
 470        pid_t *last_pid;
 471        int ret;
 472
 473        if (!data)
 474                return TRACE_TYPE_HANDLED;
 475
 476        last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
 477
 478        if (*last_pid == pid)
 479                return TRACE_TYPE_HANDLED;
 480
 481        prev_pid = *last_pid;
 482        *last_pid = pid;
 483
 484        if (prev_pid == -1)
 485                return TRACE_TYPE_HANDLED;
 486/*
 487 * Context-switch trace line:
 488
 489 ------------------------------------------
 490 | 1)  migration/0--1  =>  sshd-1755
 491 ------------------------------------------
 492
 493 */
 494        ret = trace_seq_printf(s,
 495                " ------------------------------------------\n");
 496        if (!ret)
 497                return TRACE_TYPE_PARTIAL_LINE;
 498
 499        ret = print_graph_cpu(s, cpu);
 500        if (ret == TRACE_TYPE_PARTIAL_LINE)
 501                return TRACE_TYPE_PARTIAL_LINE;
 502
 503        ret = print_graph_proc(s, prev_pid);
 504        if (ret == TRACE_TYPE_PARTIAL_LINE)
 505                return TRACE_TYPE_PARTIAL_LINE;
 506
 507        ret = trace_seq_printf(s, " => ");
 508        if (!ret)
 509                return TRACE_TYPE_PARTIAL_LINE;
 510
 511        ret = print_graph_proc(s, pid);
 512        if (ret == TRACE_TYPE_PARTIAL_LINE)
 513                return TRACE_TYPE_PARTIAL_LINE;
 514
 515        ret = trace_seq_printf(s,
 516                "\n ------------------------------------------\n\n");
 517        if (!ret)
 518                return TRACE_TYPE_PARTIAL_LINE;
 519
 520        return TRACE_TYPE_HANDLED;
 521}
 522
 523static struct ftrace_graph_ret_entry *
 524get_return_for_leaf(struct trace_iterator *iter,
 525                struct ftrace_graph_ent_entry *curr)
 526{
 527        struct fgraph_data *data = iter->private;
 528        struct ring_buffer_iter *ring_iter = NULL;
 529        struct ring_buffer_event *event;
 530        struct ftrace_graph_ret_entry *next;
 531
 532        /*
 533         * If the previous output failed to write to the seq buffer,
 534         * then we just reuse the data from before.
 535         */
 536        if (data && data->failed) {
 537                curr = &data->ent;
 538                next = &data->ret;
 539        } else {
 540
 541                ring_iter = iter->buffer_iter[iter->cpu];
 542
 543                /* First peek to compare current entry and the next one */
 544                if (ring_iter)
 545                        event = ring_buffer_iter_peek(ring_iter, NULL);
 546                else {
 547                        /*
 548                         * We need to consume the current entry to see
 549                         * the next one.
 550                         */
 551                        ring_buffer_consume(iter->tr->buffer, iter->cpu,
 552                                            NULL, NULL);
 553                        event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
 554                                                 NULL, NULL);
 555                }
 556
 557                if (!event)
 558                        return NULL;
 559
 560                next = ring_buffer_event_data(event);
 561
 562                if (data) {
 563                        /*
 564                         * Save current and next entries for later reference
 565                         * if the output fails.
 566                         */
 567                        data->ent = *curr;
 568                        /*
 569                         * If the next event is not a return type, then
 570                         * we only care about what type it is. Otherwise we can
 571                         * safely copy the entire event.
 572                         */
 573                        if (next->ent.type == TRACE_GRAPH_RET)
 574                                data->ret = *next;
 575                        else
 576                                data->ret.ent.type = next->ent.type;
 577                }
 578        }
 579
 580        if (next->ent.type != TRACE_GRAPH_RET)
 581                return NULL;
 582
 583        if (curr->ent.pid != next->ent.pid ||
 584                        curr->graph_ent.func != next->ret.func)
 585                return NULL;
 586
 587        /* this is a leaf, now advance the iterator */
 588        if (ring_iter)
 589                ring_buffer_read(ring_iter, NULL);
 590
 591        return next;
 592}
 593
 594static int print_graph_abs_time(u64 t, struct trace_seq *s)
 595{
 596        unsigned long usecs_rem;
 597
 598        usecs_rem = do_div(t, NSEC_PER_SEC);
 599        usecs_rem /= 1000;
 600
 601        return trace_seq_printf(s, "%5lu.%06lu |  ",
 602                        (unsigned long)t, usecs_rem);
 603}
 604
 605static enum print_line_t
 606print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 607                enum trace_type type, int cpu, pid_t pid, u32 flags)
 608{
 609        int ret;
 610        struct trace_seq *s = &iter->seq;
 611
 612        if (addr < (unsigned long)__irqentry_text_start ||
 613                addr >= (unsigned long)__irqentry_text_end)
 614                return TRACE_TYPE_UNHANDLED;
 615
 616        if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
 617                /* Absolute time */
 618                if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
 619                        ret = print_graph_abs_time(iter->ts, s);
 620                        if (!ret)
 621                                return TRACE_TYPE_PARTIAL_LINE;
 622                }
 623
 624                /* Cpu */
 625                if (flags & TRACE_GRAPH_PRINT_CPU) {
 626                        ret = print_graph_cpu(s, cpu);
 627                        if (ret == TRACE_TYPE_PARTIAL_LINE)
 628                                return TRACE_TYPE_PARTIAL_LINE;
 629                }
 630
 631                /* Proc */
 632                if (flags & TRACE_GRAPH_PRINT_PROC) {
 633                        ret = print_graph_proc(s, pid);
 634                        if (ret == TRACE_TYPE_PARTIAL_LINE)
 635                                return TRACE_TYPE_PARTIAL_LINE;
 636                        ret = trace_seq_printf(s, " | ");
 637                        if (!ret)
 638                                return TRACE_TYPE_PARTIAL_LINE;
 639                }
 640        }
 641
 642        /* No overhead */
 643        ret = print_graph_duration(DURATION_FILL_START, s, flags);
 644        if (ret != TRACE_TYPE_HANDLED)
 645                return ret;
 646
 647        if (type == TRACE_GRAPH_ENT)
 648                ret = trace_seq_printf(s, "==========>");
 649        else
 650                ret = trace_seq_printf(s, "<==========");
 651
 652        if (!ret)
 653                return TRACE_TYPE_PARTIAL_LINE;
 654
 655        ret = print_graph_duration(DURATION_FILL_END, s, flags);
 656        if (ret != TRACE_TYPE_HANDLED)
 657                return ret;
 658
 659        ret = trace_seq_printf(s, "\n");
 660
 661        if (!ret)
 662                return TRACE_TYPE_PARTIAL_LINE;
 663        return TRACE_TYPE_HANDLED;
 664}
 665
 666enum print_line_t
 667trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 668{
 669        unsigned long nsecs_rem = do_div(duration, 1000);
 670        /* log10(ULONG_MAX) + '\0' */
 671        char msecs_str[21];
 672        char nsecs_str[5];
 673        int ret, len;
 674        int i;
 675
 676        sprintf(msecs_str, "%lu", (unsigned long) duration);
 677
 678        /* Print msecs */
 679        ret = trace_seq_printf(s, "%s", msecs_str);
 680        if (!ret)
 681                return TRACE_TYPE_PARTIAL_LINE;
 682
 683        len = strlen(msecs_str);
 684
 685        /* Print nsecs (we don't want to exceed 7 numbers) */
 686        if (len < 7) {
 687                size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
 688
 689                snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 690                ret = trace_seq_printf(s, ".%s", nsecs_str);
 691                if (!ret)
 692                        return TRACE_TYPE_PARTIAL_LINE;
 693                len += strlen(nsecs_str);
 694        }
 695
 696        ret = trace_seq_printf(s, " us ");
 697        if (!ret)
 698                return TRACE_TYPE_PARTIAL_LINE;
 699
 700        /* Print remaining spaces to fit the row's width */
 701        for (i = len; i < 7; i++) {
 702                ret = trace_seq_printf(s, " ");
 703                if (!ret)
 704                        return TRACE_TYPE_PARTIAL_LINE;
 705        }
 706        return TRACE_TYPE_HANDLED;
 707}
 708
 709static enum print_line_t
 710print_graph_duration(unsigned long long duration, struct trace_seq *s,
 711                     u32 flags)
 712{
 713        int ret = -1;
 714
 715        if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
 716            !(trace_flags & TRACE_ITER_CONTEXT_INFO))
 717                        return TRACE_TYPE_HANDLED;
 718
 719        /* No real adata, just filling the column with spaces */
 720        switch (duration) {
 721        case DURATION_FILL_FULL:
 722                ret = trace_seq_printf(s, "              |  ");
 723                return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
 724        case DURATION_FILL_START:
 725                ret = trace_seq_printf(s, "  ");
 726                return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
 727        case DURATION_FILL_END:
 728                ret = trace_seq_printf(s, " |");
 729                return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
 730        }
 731
 732        /* Signal a overhead of time execution to the output */
 733        if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
 734                /* Duration exceeded 100 msecs */
 735                if (duration > 100000ULL)
 736                        ret = trace_seq_printf(s, "! ");
 737                /* Duration exceeded 10 msecs */
 738                else if (duration > 10000ULL)
 739                        ret = trace_seq_printf(s, "+ ");
 740        }
 741
 742        /*
 743         * The -1 means we either did not exceed the duration tresholds
 744         * or we dont want to print out the overhead. Either way we need
 745         * to fill out the space.
 746         */
 747        if (ret == -1)
 748                ret = trace_seq_printf(s, "  ");
 749
 750        /* Catching here any failure happenned above */
 751        if (!ret)
 752                return TRACE_TYPE_PARTIAL_LINE;
 753
 754        ret = trace_print_graph_duration(duration, s);
 755        if (ret != TRACE_TYPE_HANDLED)
 756                return ret;
 757
 758        ret = trace_seq_printf(s, "|  ");
 759        if (!ret)
 760                return TRACE_TYPE_PARTIAL_LINE;
 761
 762        return TRACE_TYPE_HANDLED;
 763}
 764
 765/* Case of a leaf function on its call entry */
 766static enum print_line_t
 767print_graph_entry_leaf(struct trace_iterator *iter,
 768                struct ftrace_graph_ent_entry *entry,
 769                struct ftrace_graph_ret_entry *ret_entry,
 770                struct trace_seq *s, u32 flags)
 771{
 772        struct fgraph_data *data = iter->private;
 773        struct ftrace_graph_ret *graph_ret;
 774        struct ftrace_graph_ent *call;
 775        unsigned long long duration;
 776        int ret;
 777        int i;
 778
 779        graph_ret = &ret_entry->ret;
 780        call = &entry->graph_ent;
 781        duration = graph_ret->rettime - graph_ret->calltime;
 782
 783        if (data) {
 784                struct fgraph_cpu_data *cpu_data;
 785                int cpu = iter->cpu;
 786
 787                cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 788
 789                /*
 790                 * Comments display at + 1 to depth. Since
 791                 * this is a leaf function, keep the comments
 792                 * equal to this depth.
 793                 */
 794                cpu_data->depth = call->depth - 1;
 795
 796                /* No need to keep this function around for this depth */
 797                if (call->depth < FTRACE_RETFUNC_DEPTH)
 798                        cpu_data->enter_funcs[call->depth] = 0;
 799        }
 800
 801        /* Overhead and duration */
 802        ret = print_graph_duration(duration, s, flags);
 803        if (ret == TRACE_TYPE_PARTIAL_LINE)
 804                return TRACE_TYPE_PARTIAL_LINE;
 805
 806        /* Function */
 807        for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
 808                ret = trace_seq_printf(s, " ");
 809                if (!ret)
 810                        return TRACE_TYPE_PARTIAL_LINE;
 811        }
 812
 813        ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
 814        if (!ret)
 815                return TRACE_TYPE_PARTIAL_LINE;
 816
 817        return TRACE_TYPE_HANDLED;
 818}
 819
 820static enum print_line_t
 821print_graph_entry_nested(struct trace_iterator *iter,
 822                         struct ftrace_graph_ent_entry *entry,
 823                         struct trace_seq *s, int cpu, u32 flags)
 824{
 825        struct ftrace_graph_ent *call = &entry->graph_ent;
 826        struct fgraph_data *data = iter->private;
 827        int ret;
 828        int i;
 829
 830        if (data) {
 831                struct fgraph_cpu_data *cpu_data;
 832                int cpu = iter->cpu;
 833
 834                cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 835                cpu_data->depth = call->depth;
 836
 837                /* Save this function pointer to see if the exit matches */
 838                if (call->depth < FTRACE_RETFUNC_DEPTH)
 839                        cpu_data->enter_funcs[call->depth] = call->func;
 840        }
 841
 842        /* No time */
 843        ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
 844        if (ret != TRACE_TYPE_HANDLED)
 845                return ret;
 846
 847        /* Function */
 848        for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
 849                ret = trace_seq_printf(s, " ");
 850                if (!ret)
 851                        return TRACE_TYPE_PARTIAL_LINE;
 852        }
 853
 854        ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
 855        if (!ret)
 856                return TRACE_TYPE_PARTIAL_LINE;
 857
 858        /*
 859         * we already consumed the current entry to check the next one
 860         * and see if this is a leaf.
 861         */
 862        return TRACE_TYPE_NO_CONSUME;
 863}
 864
 865static enum print_line_t
 866print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 867                     int type, unsigned long addr, u32 flags)
 868{
 869        struct fgraph_data *data = iter->private;
 870        struct trace_entry *ent = iter->ent;
 871        int cpu = iter->cpu;
 872        int ret;
 873
 874        /* Pid */
 875        if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
 876                return TRACE_TYPE_PARTIAL_LINE;
 877
 878        if (type) {
 879                /* Interrupt */
 880                ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
 881                if (ret == TRACE_TYPE_PARTIAL_LINE)
 882                        return TRACE_TYPE_PARTIAL_LINE;
 883        }
 884
 885        if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
 886                return 0;
 887
 888        /* Absolute time */
 889        if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
 890                ret = print_graph_abs_time(iter->ts, s);
 891                if (!ret)
 892                        return TRACE_TYPE_PARTIAL_LINE;
 893        }
 894
 895        /* Cpu */
 896        if (flags & TRACE_GRAPH_PRINT_CPU) {
 897                ret = print_graph_cpu(s, cpu);
 898                if (ret == TRACE_TYPE_PARTIAL_LINE)
 899                        return TRACE_TYPE_PARTIAL_LINE;
 900        }
 901
 902        /* Proc */
 903        if (flags & TRACE_GRAPH_PRINT_PROC) {
 904                ret = print_graph_proc(s, ent->pid);
 905                if (ret == TRACE_TYPE_PARTIAL_LINE)
 906                        return TRACE_TYPE_PARTIAL_LINE;
 907
 908                ret = trace_seq_printf(s, " | ");
 909                if (!ret)
 910                        return TRACE_TYPE_PARTIAL_LINE;
 911        }
 912
 913        /* Latency format */
 914        if (trace_flags & TRACE_ITER_LATENCY_FMT) {
 915                ret = print_graph_lat_fmt(s, ent);
 916                if (ret == TRACE_TYPE_PARTIAL_LINE)
 917                        return TRACE_TYPE_PARTIAL_LINE;
 918        }
 919
 920        return 0;
 921}
 922
 923/*
 924 * Entry check for irq code
 925 *
 926 * returns 1 if
 927 *  - we are inside irq code
 928 *  - we just entered irq code
 929 *
 930 * retunns 0 if
 931 *  - funcgraph-interrupts option is set
 932 *  - we are not inside irq code
 933 */
 934static int
 935check_irq_entry(struct trace_iterator *iter, u32 flags,
 936                unsigned long addr, int depth)
 937{
 938        int cpu = iter->cpu;
 939        int *depth_irq;
 940        struct fgraph_data *data = iter->private;
 941
 942        /*
 943         * If we are either displaying irqs, or we got called as
 944         * a graph event and private data does not exist,
 945         * then we bypass the irq check.
 946         */
 947        if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 948            (!data))
 949                return 0;
 950
 951        depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 952
 953        /*
 954         * We are inside the irq code
 955         */
 956        if (*depth_irq >= 0)
 957                return 1;
 958
 959        if ((addr < (unsigned long)__irqentry_text_start) ||
 960            (addr >= (unsigned long)__irqentry_text_end))
 961                return 0;
 962
 963        /*
 964         * We are entering irq code.
 965         */
 966        *depth_irq = depth;
 967        return 1;
 968}
 969
 970/*
 971 * Return check for irq code
 972 *
 973 * returns 1 if
 974 *  - we are inside irq code
 975 *  - we just left irq code
 976 *
 977 * returns 0 if
 978 *  - funcgraph-interrupts option is set
 979 *  - we are not inside irq code
 980 */
 981static int
 982check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
 983{
 984        int cpu = iter->cpu;
 985        int *depth_irq;
 986        struct fgraph_data *data = iter->private;
 987
 988        /*
 989         * If we are either displaying irqs, or we got called as
 990         * a graph event and private data does not exist,
 991         * then we bypass the irq check.
 992         */
 993        if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 994            (!data))
 995                return 0;
 996
 997        depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 998
 999        /*
1000         * We are not inside the irq code.
1001         */
1002        if (*depth_irq == -1)
1003                return 0;
1004
1005        /*
1006         * We are inside the irq code, and this is returning entry.
1007         * Let's not trace it and clear the entry depth, since
1008         * we are out of irq code.
1009         *
1010         * This condition ensures that we 'leave the irq code' once
1011         * we are out of the entry depth. Thus protecting us from
1012         * the RETURN entry loss.
1013         */
1014        if (*depth_irq >= depth) {
1015                *depth_irq = -1;
1016                return 1;
1017        }
1018
1019        /*
1020         * We are inside the irq code, and this is not the entry.
1021         */
1022        return 1;
1023}
1024
1025static enum print_line_t
1026print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1027                        struct trace_iterator *iter, u32 flags)
1028{
1029        struct fgraph_data *data = iter->private;
1030        struct ftrace_graph_ent *call = &field->graph_ent;
1031        struct ftrace_graph_ret_entry *leaf_ret;
1032        static enum print_line_t ret;
1033        int cpu = iter->cpu;
1034
1035        if (check_irq_entry(iter, flags, call->func, call->depth))
1036                return TRACE_TYPE_HANDLED;
1037
1038        if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1039                return TRACE_TYPE_PARTIAL_LINE;
1040
1041        leaf_ret = get_return_for_leaf(iter, field);
1042        if (leaf_ret)
1043                ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1044        else
1045                ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1046
1047        if (data) {
1048                /*
1049                 * If we failed to write our output, then we need to make
1050                 * note of it. Because we already consumed our entry.
1051                 */
1052                if (s->full) {
1053                        data->failed = 1;
1054                        data->cpu = cpu;
1055                } else
1056                        data->failed = 0;
1057        }
1058
1059        return ret;
1060}
1061
1062static enum print_line_t
1063print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1064                   struct trace_entry *ent, struct trace_iterator *iter,
1065                   u32 flags)
1066{
1067        unsigned long long duration = trace->rettime - trace->calltime;
1068        struct fgraph_data *data = iter->private;
1069        pid_t pid = ent->pid;
1070        int cpu = iter->cpu;
1071        int func_match = 1;
1072        int ret;
1073        int i;
1074
1075        if (check_irq_return(iter, flags, trace->depth))
1076                return TRACE_TYPE_HANDLED;
1077
1078        if (data) {
1079                struct fgraph_cpu_data *cpu_data;
1080                int cpu = iter->cpu;
1081
1082                cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1083
1084                /*
1085                 * Comments display at + 1 to depth. This is the
1086                 * return from a function, we now want the comments
1087                 * to display at the same level of the bracket.
1088                 */
1089                cpu_data->depth = trace->depth - 1;
1090
1091                if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1092                        if (cpu_data->enter_funcs[trace->depth] != trace->func)
1093                                func_match = 0;
1094                        cpu_data->enter_funcs[trace->depth] = 0;
1095                }
1096        }
1097
1098        if (print_graph_prologue(iter, s, 0, 0, flags))
1099                return TRACE_TYPE_PARTIAL_LINE;
1100
1101        /* Overhead and duration */
1102        ret = print_graph_duration(duration, s, flags);
1103        if (ret == TRACE_TYPE_PARTIAL_LINE)
1104                return TRACE_TYPE_PARTIAL_LINE;
1105
1106        /* Closing brace */
1107        for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1108                ret = trace_seq_printf(s, " ");
1109                if (!ret)
1110                        return TRACE_TYPE_PARTIAL_LINE;
1111        }
1112
1113        /*
1114         * If the return function does not have a matching entry,
1115         * then the entry was lost. Instead of just printing
1116         * the '}' and letting the user guess what function this
1117         * belongs to, write out the function name.
1118         */
1119        if (func_match) {
1120                ret = trace_seq_printf(s, "}\n");
1121                if (!ret)
1122                        return TRACE_TYPE_PARTIAL_LINE;
1123        } else {
1124                ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1125                if (!ret)
1126                        return TRACE_TYPE_PARTIAL_LINE;
1127        }
1128
1129        /* Overrun */
1130        if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1131                ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1132                                        trace->overrun);
1133                if (!ret)
1134                        return TRACE_TYPE_PARTIAL_LINE;
1135        }
1136
1137        ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1138                              cpu, pid, flags);
1139        if (ret == TRACE_TYPE_PARTIAL_LINE)
1140                return TRACE_TYPE_PARTIAL_LINE;
1141
1142        return TRACE_TYPE_HANDLED;
1143}
1144
1145static enum print_line_t
1146print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1147                    struct trace_iterator *iter, u32 flags)
1148{
1149        unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1150        struct fgraph_data *data = iter->private;
1151        struct trace_event *event;
1152        int depth = 0;
1153        int ret;
1154        int i;
1155
1156        if (data)
1157                depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1158
1159        if (print_graph_prologue(iter, s, 0, 0, flags))
1160                return TRACE_TYPE_PARTIAL_LINE;
1161
1162        /* No time */
1163        ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
1164        if (ret != TRACE_TYPE_HANDLED)
1165                return ret;
1166
1167        /* Indentation */
1168        if (depth > 0)
1169                for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1170                        ret = trace_seq_printf(s, " ");
1171                        if (!ret)
1172                                return TRACE_TYPE_PARTIAL_LINE;
1173                }
1174
1175        /* The comment */
1176        ret = trace_seq_printf(s, "/* ");
1177        if (!ret)
1178                return TRACE_TYPE_PARTIAL_LINE;
1179
1180        switch (iter->ent->type) {
1181        case TRACE_BPRINT:
1182                ret = trace_print_bprintk_msg_only(iter);
1183                if (ret != TRACE_TYPE_HANDLED)
1184                        return ret;
1185                break;
1186        case TRACE_PRINT:
1187                ret = trace_print_printk_msg_only(iter);
1188                if (ret != TRACE_TYPE_HANDLED)
1189                        return ret;
1190                break;
1191        default:
1192                event = ftrace_find_event(ent->type);
1193                if (!event)
1194                        return TRACE_TYPE_UNHANDLED;
1195
1196                ret = event->funcs->trace(iter, sym_flags, event);
1197                if (ret != TRACE_TYPE_HANDLED)
1198                        return ret;
1199        }
1200
1201        /* Strip ending newline */
1202        if (s->buffer[s->len - 1] == '\n') {
1203                s->buffer[s->len - 1] = '\0';
1204                s->len--;
1205        }
1206
1207        ret = trace_seq_printf(s, " */\n");
1208        if (!ret)
1209                return TRACE_TYPE_PARTIAL_LINE;
1210
1211        return TRACE_TYPE_HANDLED;
1212}
1213
1214
1215enum print_line_t
1216print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1217{
1218        struct ftrace_graph_ent_entry *field;
1219        struct fgraph_data *data = iter->private;
1220        struct trace_entry *entry = iter->ent;
1221        struct trace_seq *s = &iter->seq;
1222        int cpu = iter->cpu;
1223        int ret;
1224
1225        if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1226                per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1227                return TRACE_TYPE_HANDLED;
1228        }
1229
1230        /*
1231         * If the last output failed, there's a possibility we need
1232         * to print out the missing entry which would never go out.
1233         */
1234        if (data && data->failed) {
1235                field = &data->ent;
1236                iter->cpu = data->cpu;
1237                ret = print_graph_entry(field, s, iter, flags);
1238                if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1239                        per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1240                        ret = TRACE_TYPE_NO_CONSUME;
1241                }
1242                iter->cpu = cpu;
1243                return ret;
1244        }
1245
1246        switch (entry->type) {
1247        case TRACE_GRAPH_ENT: {
1248                /*
1249                 * print_graph_entry() may consume the current event,
1250                 * thus @field may become invalid, so we need to save it.
1251                 * sizeof(struct ftrace_graph_ent_entry) is very small,
1252                 * it can be safely saved at the stack.
1253                 */
1254                struct ftrace_graph_ent_entry saved;
1255                trace_assign_type(field, entry);
1256                saved = *field;
1257                return print_graph_entry(&saved, s, iter, flags);
1258        }
1259        case TRACE_GRAPH_RET: {
1260                struct ftrace_graph_ret_entry *field;
1261                trace_assign_type(field, entry);
1262                return print_graph_return(&field->ret, s, entry, iter, flags);
1263        }
1264        case TRACE_STACK:
1265        case TRACE_FN:
1266                /* dont trace stack and functions as comments */
1267                return TRACE_TYPE_UNHANDLED;
1268
1269        default:
1270                return print_graph_comment(s, entry, iter, flags);
1271        }
1272
1273        return TRACE_TYPE_HANDLED;
1274}
1275
1276static enum print_line_t
1277print_graph_function(struct trace_iterator *iter)
1278{
1279        return print_graph_function_flags(iter, tracer_flags.val);
1280}
1281
1282static enum print_line_t
1283print_graph_function_event(struct trace_iterator *iter, int flags,
1284                           struct trace_event *event)
1285{
1286        return print_graph_function(iter);
1287}
1288
1289static void print_lat_header(struct seq_file *s, u32 flags)
1290{
1291        static const char spaces[] = "                " /* 16 spaces */
1292                "    "                                  /* 4 spaces */
1293                "                 ";                    /* 17 spaces */
1294        int size = 0;
1295
1296        if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1297                size += 16;
1298        if (flags & TRACE_GRAPH_PRINT_CPU)
1299                size += 4;
1300        if (flags & TRACE_GRAPH_PRINT_PROC)
1301                size += 17;
1302
1303        seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1304        seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1305        seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1306        seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1307        seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1308}
1309
1310static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1311{
1312        int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1313
1314        if (lat)
1315                print_lat_header(s, flags);
1316
1317        /* 1st line */
1318        seq_printf(s, "#");
1319        if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1320                seq_printf(s, "     TIME       ");
1321        if (flags & TRACE_GRAPH_PRINT_CPU)
1322                seq_printf(s, " CPU");
1323        if (flags & TRACE_GRAPH_PRINT_PROC)
1324                seq_printf(s, "  TASK/PID       ");
1325        if (lat)
1326                seq_printf(s, "||||");
1327        if (flags & TRACE_GRAPH_PRINT_DURATION)
1328                seq_printf(s, "  DURATION   ");
1329        seq_printf(s, "               FUNCTION CALLS\n");
1330
1331        /* 2nd line */
1332        seq_printf(s, "#");
1333        if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1334                seq_printf(s, "      |         ");
1335        if (flags & TRACE_GRAPH_PRINT_CPU)
1336                seq_printf(s, " |  ");
1337        if (flags & TRACE_GRAPH_PRINT_PROC)
1338                seq_printf(s, "   |    |        ");
1339        if (lat)
1340                seq_printf(s, "||||");
1341        if (flags & TRACE_GRAPH_PRINT_DURATION)
1342                seq_printf(s, "   |   |      ");
1343        seq_printf(s, "               |   |   |   |\n");
1344}
1345
1346void print_graph_headers(struct seq_file *s)
1347{
1348        print_graph_headers_flags(s, tracer_flags.val);
1349}
1350
1351void print_graph_headers_flags(struct seq_file *s, u32 flags)
1352{
1353        struct trace_iterator *iter = s->private;
1354
1355        if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1356                return;
1357
1358        if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1359                /* print nothing if the buffers are empty */
1360                if (trace_empty(iter))
1361                        return;
1362
1363                print_trace_header(s, iter);
1364        }
1365
1366        __print_graph_headers_flags(s, flags);
1367}
1368
1369void graph_trace_open(struct trace_iterator *iter)
1370{
1371        /* pid and depth on the last trace processed */
1372        struct fgraph_data *data;
1373        int cpu;
1374
1375        iter->private = NULL;
1376
1377        data = kzalloc(sizeof(*data), GFP_KERNEL);
1378        if (!data)
1379                goto out_err;
1380
1381        data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1382        if (!data->cpu_data)
1383                goto out_err_free;
1384
1385        for_each_possible_cpu(cpu) {
1386                pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1387                int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1388                int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1389                int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1390
1391                *pid = -1;
1392                *depth = 0;
1393                *ignore = 0;
1394                *depth_irq = -1;
1395        }
1396
1397        iter->private = data;
1398
1399        return;
1400
1401 out_err_free:
1402        kfree(data);
1403 out_err:
1404        pr_warning("function graph tracer: not enough memory\n");
1405}
1406
1407void graph_trace_close(struct trace_iterator *iter)
1408{
1409        struct fgraph_data *data = iter->private;
1410
1411        if (data) {
1412                free_percpu(data->cpu_data);
1413                kfree(data);
1414        }
1415}
1416
1417static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
1418{
1419        if (bit == TRACE_GRAPH_PRINT_IRQS)
1420                ftrace_graph_skip_irqs = !set;
1421
1422        return 0;
1423}
1424
1425static struct trace_event_functions graph_functions = {
1426        .trace          = print_graph_function_event,
1427};
1428
1429static struct trace_event graph_trace_entry_event = {
1430        .type           = TRACE_GRAPH_ENT,
1431        .funcs          = &graph_functions,
1432};
1433
1434static struct trace_event graph_trace_ret_event = {
1435        .type           = TRACE_GRAPH_RET,
1436        .funcs          = &graph_functions
1437};
1438
1439static struct tracer graph_trace __read_mostly = {
1440        .name           = "function_graph",
1441        .open           = graph_trace_open,
1442        .pipe_open      = graph_trace_open,
1443        .close          = graph_trace_close,
1444        .pipe_close     = graph_trace_close,
1445        .wait_pipe      = poll_wait_pipe,
1446        .init           = graph_trace_init,
1447        .reset          = graph_trace_reset,
1448        .print_line     = print_graph_function,
1449        .print_header   = print_graph_headers,
1450        .flags          = &tracer_flags,
1451        .set_flag       = func_graph_set_flag,
1452#ifdef CONFIG_FTRACE_SELFTEST
1453        .selftest       = trace_selftest_startup_function_graph,
1454#endif
1455};
1456
1457static __init int init_graph_trace(void)
1458{
1459        max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1460
1461        if (!register_ftrace_event(&graph_trace_entry_event)) {
1462                pr_warning("Warning: could not register graph trace events\n");
1463                return 1;
1464        }
1465
1466        if (!register_ftrace_event(&graph_trace_ret_event)) {
1467                pr_warning("Warning: could not register graph trace events\n");
1468                return 1;
1469        }
1470
1471        return register_tracer(&graph_trace);
1472}
1473
1474device_initcall(init_graph_trace);
1475