linux/kernel/trace/trace_functions_graph.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Function graph tracer.
   5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   6 * Mostly borrowed from function tracer which
   7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   8 *
   9 */
  10#include <linux/uaccess.h>
  11#include <linux/ftrace.h>
  12#include <linux/interrupt.h>
  13#include <linux/slab.h>
  14#include <linux/fs.h>
  15
  16#include "trace.h"
  17#include "trace_output.h"
  18
  19static bool kill_ftrace_graph;
  20
  21/**
  22 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  23 *
  24 * ftrace_graph_stop() is called when a severe error is detected in
  25 * the function graph tracing. This function is called by the critical
  26 * paths of function graph to keep those paths from doing any more harm.
  27 */
  28bool ftrace_graph_is_dead(void)
  29{
  30        return kill_ftrace_graph;
  31}
  32
  33/**
  34 * ftrace_graph_stop - set to permanently disable function graph tracincg
  35 *
  36 * In case of an error int function graph tracing, this is called
  37 * to try to keep function graph tracing from causing any more harm.
  38 * Usually this is pretty severe and this is called to try to at least
  39 * get a warning out to the user.
  40 */
  41void ftrace_graph_stop(void)
  42{
  43        kill_ftrace_graph = true;
  44}
  45
  46/* When set, irq functions will be ignored */
  47static int ftrace_graph_skip_irqs;
  48
  49struct fgraph_cpu_data {
  50        pid_t           last_pid;
  51        int             depth;
  52        int             depth_irq;
  53        int             ignore;
  54        unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
  55};
  56
  57struct fgraph_data {
  58        struct fgraph_cpu_data __percpu *cpu_data;
  59
  60        /* Place to preserve last processed entry. */
  61        struct ftrace_graph_ent_entry   ent;
  62        struct ftrace_graph_ret_entry   ret;
  63        int                             failed;
  64        int                             cpu;
  65};
  66
  67#define TRACE_GRAPH_INDENT      2
  68
  69unsigned int fgraph_max_depth;
  70
  71static struct tracer_opt trace_opts[] = {
  72        /* Display overruns? (for self-debug purpose) */
  73        { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  74        /* Display CPU ? */
  75        { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  76        /* Display Overhead ? */
  77        { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  78        /* Display proc name/pid */
  79        { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  80        /* Display duration of execution */
  81        { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  82        /* Display absolute time of an entry */
  83        { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  84        /* Display interrupts */
  85        { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  86        /* Display function name after trailing } */
  87        { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  88        /* Include sleep time (scheduled out) between entry and return */
  89        { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  90        /* Include time within nested functions */
  91        { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  92        { } /* Empty entry */
  93};
  94
  95static struct tracer_flags tracer_flags = {
  96        /* Don't display overruns, proc, or tail by default */
  97        .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  98               TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  99               TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
 100        .opts = trace_opts
 101};
 102
 103static struct trace_array *graph_array;
 104
 105/*
 106 * DURATION column is being also used to display IRQ signs,
 107 * following values are used by print_graph_irq and others
 108 * to fill in space into DURATION column.
 109 */
 110enum {
 111        FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 112        FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 113        FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 114};
 115
 116static void
 117print_graph_duration(struct trace_array *tr, unsigned long long duration,
 118                     struct trace_seq *s, u32 flags);
 119
 120/* Add a function return address to the trace stack on thread info.*/
 121int
 122ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
 123                         unsigned long frame_pointer, unsigned long *retp)
 124{
 125        unsigned long long calltime;
 126        int index;
 127
 128        if (unlikely(ftrace_graph_is_dead()))
 129                return -EBUSY;
 130
 131        if (!current->ret_stack)
 132                return -EBUSY;
 133
 134        /*
 135         * We must make sure the ret_stack is tested before we read
 136         * anything else.
 137         */
 138        smp_rmb();
 139
 140        /* The return trace stack is full */
 141        if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
 142                atomic_inc(&current->trace_overrun);
 143                return -EBUSY;
 144        }
 145
 146        /*
 147         * The curr_ret_stack is an index to ftrace return stack of
 148         * current task.  Its value should be in [0, FTRACE_RETFUNC_
 149         * DEPTH) when the function graph tracer is used.  To support
 150         * filtering out specific functions, it makes the index
 151         * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
 152         * so when it sees a negative index the ftrace will ignore
 153         * the record.  And the index gets recovered when returning
 154         * from the filtered function by adding the FTRACE_NOTRACE_
 155         * DEPTH and then it'll continue to record functions normally.
 156         *
 157         * The curr_ret_stack is initialized to -1 and get increased
 158         * in this function.  So it can be less than -1 only if it was
 159         * filtered out via ftrace_graph_notrace_addr() which can be
 160         * set from set_graph_notrace file in tracefs by user.
 161         */
 162        if (current->curr_ret_stack < -1)
 163                return -EBUSY;
 164
 165        calltime = trace_clock_local();
 166
 167        index = ++current->curr_ret_stack;
 168        if (ftrace_graph_notrace_addr(func))
 169                current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
 170        barrier();
 171        current->ret_stack[index].ret = ret;
 172        current->ret_stack[index].func = func;
 173        current->ret_stack[index].calltime = calltime;
 174#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 175        current->ret_stack[index].fp = frame_pointer;
 176#endif
 177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 178        current->ret_stack[index].retp = retp;
 179#endif
 180        *depth = current->curr_ret_stack;
 181
 182        return 0;
 183}
 184
 185/* Retrieve a function return address to the trace stack on thread info.*/
 186static void
 187ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
 188                        unsigned long frame_pointer)
 189{
 190        int index;
 191
 192        index = current->curr_ret_stack;
 193
 194        /*
 195         * A negative index here means that it's just returned from a
 196         * notrace'd function.  Recover index to get an original
 197         * return address.  See ftrace_push_return_trace().
 198         *
 199         * TODO: Need to check whether the stack gets corrupted.
 200         */
 201        if (index < 0)
 202                index += FTRACE_NOTRACE_DEPTH;
 203
 204        if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
 205                ftrace_graph_stop();
 206                WARN_ON(1);
 207                /* Might as well panic, otherwise we have no where to go */
 208                *ret = (unsigned long)panic;
 209                return;
 210        }
 211
 212#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 213        /*
 214         * The arch may choose to record the frame pointer used
 215         * and check it here to make sure that it is what we expect it
 216         * to be. If gcc does not set the place holder of the return
 217         * address in the frame pointer, and does a copy instead, then
 218         * the function graph trace will fail. This test detects this
 219         * case.
 220         *
 221         * Currently, x86_32 with optimize for size (-Os) makes the latest
 222         * gcc do the above.
 223         *
 224         * Note, -mfentry does not use frame pointers, and this test
 225         *  is not needed if CC_USING_FENTRY is set.
 226         */
 227        if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
 228                ftrace_graph_stop();
 229                WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
 230                     "  from func %ps return to %lx\n",
 231                     current->ret_stack[index].fp,
 232                     frame_pointer,
 233                     (void *)current->ret_stack[index].func,
 234                     current->ret_stack[index].ret);
 235                *ret = (unsigned long)panic;
 236                return;
 237        }
 238#endif
 239
 240        *ret = current->ret_stack[index].ret;
 241        trace->func = current->ret_stack[index].func;
 242        trace->calltime = current->ret_stack[index].calltime;
 243        trace->overrun = atomic_read(&current->trace_overrun);
 244        trace->depth = index;
 245}
 246
 247/*
 248 * Send the trace to the ring-buffer.
 249 * @return the original return address.
 250 */
 251unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 252{
 253        struct ftrace_graph_ret trace;
 254        unsigned long ret;
 255
 256        ftrace_pop_return_trace(&trace, &ret, frame_pointer);
 257        trace.rettime = trace_clock_local();
 258        barrier();
 259        current->curr_ret_stack--;
 260        /*
 261         * The curr_ret_stack can be less than -1 only if it was
 262         * filtered out and it's about to return from the function.
 263         * Recover the index and continue to trace normal functions.
 264         */
 265        if (current->curr_ret_stack < -1) {
 266                current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
 267                return ret;
 268        }
 269
 270        /*
 271         * The trace should run after decrementing the ret counter
 272         * in case an interrupt were to come in. We don't want to
 273         * lose the interrupt if max_depth is set.
 274         */
 275        ftrace_graph_return(&trace);
 276
 277        if (unlikely(!ret)) {
 278                ftrace_graph_stop();
 279                WARN_ON(1);
 280                /* Might as well panic. What else to do? */
 281                ret = (unsigned long)panic;
 282        }
 283
 284        return ret;
 285}
 286
 287/**
 288 * ftrace_graph_ret_addr - convert a potentially modified stack return address
 289 *                         to its original value
 290 *
 291 * This function can be called by stack unwinding code to convert a found stack
 292 * return address ('ret') to its original value, in case the function graph
 293 * tracer has modified it to be 'return_to_handler'.  If the address hasn't
 294 * been modified, the unchanged value of 'ret' is returned.
 295 *
 296 * 'idx' is a state variable which should be initialized by the caller to zero
 297 * before the first call.
 298 *
 299 * 'retp' is a pointer to the return address on the stack.  It's ignored if
 300 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
 301 */
 302#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 303unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 304                                    unsigned long ret, unsigned long *retp)
 305{
 306        int index = task->curr_ret_stack;
 307        int i;
 308
 309        if (ret != (unsigned long)return_to_handler)
 310                return ret;
 311
 312        if (index < -1)
 313                index += FTRACE_NOTRACE_DEPTH;
 314
 315        if (index < 0)
 316                return ret;
 317
 318        for (i = 0; i <= index; i++)
 319                if (task->ret_stack[i].retp == retp)
 320                        return task->ret_stack[i].ret;
 321
 322        return ret;
 323}
 324#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
 325unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 326                                    unsigned long ret, unsigned long *retp)
 327{
 328        int task_idx;
 329
 330        if (ret != (unsigned long)return_to_handler)
 331                return ret;
 332
 333        task_idx = task->curr_ret_stack;
 334
 335        if (!task->ret_stack || task_idx < *idx)
 336                return ret;
 337
 338        task_idx -= *idx;
 339        (*idx)++;
 340
 341        return task->ret_stack[task_idx].ret;
 342}
 343#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
 344
 345int __trace_graph_entry(struct trace_array *tr,
 346                                struct ftrace_graph_ent *trace,
 347                                unsigned long flags,
 348                                int pc)
 349{
 350        struct trace_event_call *call = &event_funcgraph_entry;
 351        struct ring_buffer_event *event;
 352        struct ring_buffer *buffer = tr->trace_buffer.buffer;
 353        struct ftrace_graph_ent_entry *entry;
 354
 355        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 356                                          sizeof(*entry), flags, pc);
 357        if (!event)
 358                return 0;
 359        entry   = ring_buffer_event_data(event);
 360        entry->graph_ent                        = *trace;
 361        if (!call_filter_check_discard(call, entry, buffer, event))
 362                trace_buffer_unlock_commit_nostack(buffer, event);
 363
 364        return 1;
 365}
 366
 367static inline int ftrace_graph_ignore_irqs(void)
 368{
 369        if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 370                return 0;
 371
 372        return in_irq();
 373}
 374
 375int trace_graph_entry(struct ftrace_graph_ent *trace)
 376{
 377        struct trace_array *tr = graph_array;
 378        struct trace_array_cpu *data;
 379        unsigned long flags;
 380        long disabled;
 381        int ret;
 382        int cpu;
 383        int pc;
 384
 385        if (!ftrace_trace_task(tr))
 386                return 0;
 387
 388        if (ftrace_graph_ignore_func(trace))
 389                return 0;
 390
 391        if (ftrace_graph_ignore_irqs())
 392                return 0;
 393
 394        /*
 395         * Do not trace a function if it's filtered by set_graph_notrace.
 396         * Make the index of ret stack negative to indicate that it should
 397         * ignore further functions.  But it needs its own ret stack entry
 398         * to recover the original index in order to continue tracing after
 399         * returning from the function.
 400         */
 401        if (ftrace_graph_notrace_addr(trace->func))
 402                return 1;
 403
 404        /*
 405         * Stop here if tracing_threshold is set. We only write function return
 406         * events to the ring buffer.
 407         */
 408        if (tracing_thresh)
 409                return 1;
 410
 411        local_irq_save(flags);
 412        cpu = raw_smp_processor_id();
 413        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 414        disabled = atomic_inc_return(&data->disabled);
 415        if (likely(disabled == 1)) {
 416                pc = preempt_count();
 417                ret = __trace_graph_entry(tr, trace, flags, pc);
 418        } else {
 419                ret = 0;
 420        }
 421
 422        atomic_dec(&data->disabled);
 423        local_irq_restore(flags);
 424
 425        return ret;
 426}
 427
 428static void
 429__trace_graph_function(struct trace_array *tr,
 430                unsigned long ip, unsigned long flags, int pc)
 431{
 432        u64 time = trace_clock_local();
 433        struct ftrace_graph_ent ent = {
 434                .func  = ip,
 435                .depth = 0,
 436        };
 437        struct ftrace_graph_ret ret = {
 438                .func     = ip,
 439                .depth    = 0,
 440                .calltime = time,
 441                .rettime  = time,
 442        };
 443
 444        __trace_graph_entry(tr, &ent, flags, pc);
 445        __trace_graph_return(tr, &ret, flags, pc);
 446}
 447
 448void
 449trace_graph_function(struct trace_array *tr,
 450                unsigned long ip, unsigned long parent_ip,
 451                unsigned long flags, int pc)
 452{
 453        __trace_graph_function(tr, ip, flags, pc);
 454}
 455
 456void __trace_graph_return(struct trace_array *tr,
 457                                struct ftrace_graph_ret *trace,
 458                                unsigned long flags,
 459                                int pc)
 460{
 461        struct trace_event_call *call = &event_funcgraph_exit;
 462        struct ring_buffer_event *event;
 463        struct ring_buffer *buffer = tr->trace_buffer.buffer;
 464        struct ftrace_graph_ret_entry *entry;
 465
 466        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 467                                          sizeof(*entry), flags, pc);
 468        if (!event)
 469                return;
 470        entry   = ring_buffer_event_data(event);
 471        entry->ret                              = *trace;
 472        if (!call_filter_check_discard(call, entry, buffer, event))
 473                trace_buffer_unlock_commit_nostack(buffer, event);
 474}
 475
 476void trace_graph_return(struct ftrace_graph_ret *trace)
 477{
 478        struct trace_array *tr = graph_array;
 479        struct trace_array_cpu *data;
 480        unsigned long flags;
 481        long disabled;
 482        int cpu;
 483        int pc;
 484
 485        local_irq_save(flags);
 486        cpu = raw_smp_processor_id();
 487        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 488        disabled = atomic_inc_return(&data->disabled);
 489        if (likely(disabled == 1)) {
 490                pc = preempt_count();
 491                __trace_graph_return(tr, trace, flags, pc);
 492        }
 493        atomic_dec(&data->disabled);
 494        local_irq_restore(flags);
 495}
 496
 497void set_graph_array(struct trace_array *tr)
 498{
 499        graph_array = tr;
 500
 501        /* Make graph_array visible before we start tracing */
 502
 503        smp_mb();
 504}
 505
 506static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 507{
 508        if (tracing_thresh &&
 509            (trace->rettime - trace->calltime < tracing_thresh))
 510                return;
 511        else
 512                trace_graph_return(trace);
 513}
 514
 515static int graph_trace_init(struct trace_array *tr)
 516{
 517        int ret;
 518
 519        set_graph_array(tr);
 520        if (tracing_thresh)
 521                ret = register_ftrace_graph(&trace_graph_thresh_return,
 522                                            &trace_graph_entry);
 523        else
 524                ret = register_ftrace_graph(&trace_graph_return,
 525                                            &trace_graph_entry);
 526        if (ret)
 527                return ret;
 528        tracing_start_cmdline_record();
 529
 530        return 0;
 531}
 532
 533static void graph_trace_reset(struct trace_array *tr)
 534{
 535        tracing_stop_cmdline_record();
 536        unregister_ftrace_graph();
 537}
 538
 539static int graph_trace_update_thresh(struct trace_array *tr)
 540{
 541        graph_trace_reset(tr);
 542        return graph_trace_init(tr);
 543}
 544
 545static int max_bytes_for_cpu;
 546
 547static void print_graph_cpu(struct trace_seq *s, int cpu)
 548{
 549        /*
 550         * Start with a space character - to make it stand out
 551         * to the right a bit when trace output is pasted into
 552         * email:
 553         */
 554        trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 555}
 556
 557#define TRACE_GRAPH_PROCINFO_LENGTH     14
 558
 559static void print_graph_proc(struct trace_seq *s, pid_t pid)
 560{
 561        char comm[TASK_COMM_LEN];
 562        /* sign + log10(MAX_INT) + '\0' */
 563        char pid_str[11];
 564        int spaces = 0;
 565        int len;
 566        int i;
 567
 568        trace_find_cmdline(pid, comm);
 569        comm[7] = '\0';
 570        sprintf(pid_str, "%d", pid);
 571
 572        /* 1 stands for the "-" character */
 573        len = strlen(comm) + strlen(pid_str) + 1;
 574
 575        if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 576                spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 577
 578        /* First spaces to align center */
 579        for (i = 0; i < spaces / 2; i++)
 580                trace_seq_putc(s, ' ');
 581
 582        trace_seq_printf(s, "%s-%s", comm, pid_str);
 583
 584        /* Last spaces to align center */
 585        for (i = 0; i < spaces - (spaces / 2); i++)
 586                trace_seq_putc(s, ' ');
 587}
 588
 589
 590static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 591{
 592        trace_seq_putc(s, ' ');
 593        trace_print_lat_fmt(s, entry);
 594}
 595
 596/* If the pid changed since the last trace, output this event */
 597static void
 598verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 599{
 600        pid_t prev_pid;
 601        pid_t *last_pid;
 602
 603        if (!data)
 604                return;
 605
 606        last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
 607
 608        if (*last_pid == pid)
 609                return;
 610
 611        prev_pid = *last_pid;
 612        *last_pid = pid;
 613
 614        if (prev_pid == -1)
 615                return;
 616/*
 617 * Context-switch trace line:
 618
 619 ------------------------------------------
 620 | 1)  migration/0--1  =>  sshd-1755
 621 ------------------------------------------
 622
 623 */
 624        trace_seq_puts(s, " ------------------------------------------\n");
 625        print_graph_cpu(s, cpu);
 626        print_graph_proc(s, prev_pid);
 627        trace_seq_puts(s, " => ");
 628        print_graph_proc(s, pid);
 629        trace_seq_puts(s, "\n ------------------------------------------\n\n");
 630}
 631
 632static struct ftrace_graph_ret_entry *
 633get_return_for_leaf(struct trace_iterator *iter,
 634                struct ftrace_graph_ent_entry *curr)
 635{
 636        struct fgraph_data *data = iter->private;
 637        struct ring_buffer_iter *ring_iter = NULL;
 638        struct ring_buffer_event *event;
 639        struct ftrace_graph_ret_entry *next;
 640
 641        /*
 642         * If the previous output failed to write to the seq buffer,
 643         * then we just reuse the data from before.
 644         */
 645        if (data && data->failed) {
 646                curr = &data->ent;
 647                next = &data->ret;
 648        } else {
 649
 650                ring_iter = trace_buffer_iter(iter, iter->cpu);
 651
 652                /* First peek to compare current entry and the next one */
 653                if (ring_iter)
 654                        event = ring_buffer_iter_peek(ring_iter, NULL);
 655                else {
 656                        /*
 657                         * We need to consume the current entry to see
 658                         * the next one.
 659                         */
 660                        ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
 661                                            NULL, NULL);
 662                        event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
 663                                                 NULL, NULL);
 664                }
 665
 666                if (!event)
 667                        return NULL;
 668
 669                next = ring_buffer_event_data(event);
 670
 671                if (data) {
 672                        /*
 673                         * Save current and next entries for later reference
 674                         * if the output fails.
 675                         */
 676                        data->ent = *curr;
 677                        /*
 678                         * If the next event is not a return type, then
 679                         * we only care about what type it is. Otherwise we can
 680                         * safely copy the entire event.
 681                         */
 682                        if (next->ent.type == TRACE_GRAPH_RET)
 683                                data->ret = *next;
 684                        else
 685                                data->ret.ent.type = next->ent.type;
 686                }
 687        }
 688
 689        if (next->ent.type != TRACE_GRAPH_RET)
 690                return NULL;
 691
 692        if (curr->ent.pid != next->ent.pid ||
 693                        curr->graph_ent.func != next->ret.func)
 694                return NULL;
 695
 696        /* this is a leaf, now advance the iterator */
 697        if (ring_iter)
 698                ring_buffer_read(ring_iter, NULL);
 699
 700        return next;
 701}
 702
 703static void print_graph_abs_time(u64 t, struct trace_seq *s)
 704{
 705        unsigned long usecs_rem;
 706
 707        usecs_rem = do_div(t, NSEC_PER_SEC);
 708        usecs_rem /= 1000;
 709
 710        trace_seq_printf(s, "%5lu.%06lu |  ",
 711                         (unsigned long)t, usecs_rem);
 712}
 713
 714static void
 715print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 716                enum trace_type type, int cpu, pid_t pid, u32 flags)
 717{
 718        struct trace_array *tr = iter->tr;
 719        struct trace_seq *s = &iter->seq;
 720        struct trace_entry *ent = iter->ent;
 721
 722        if (addr < (unsigned long)__irqentry_text_start ||
 723                addr >= (unsigned long)__irqentry_text_end)
 724                return;
 725
 726        if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 727                /* Absolute time */
 728                if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 729                        print_graph_abs_time(iter->ts, s);
 730
 731                /* Cpu */
 732                if (flags & TRACE_GRAPH_PRINT_CPU)
 733                        print_graph_cpu(s, cpu);
 734
 735                /* Proc */
 736                if (flags & TRACE_GRAPH_PRINT_PROC) {
 737                        print_graph_proc(s, pid);
 738                        trace_seq_puts(s, " | ");
 739                }
 740
 741                /* Latency format */
 742                if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 743                        print_graph_lat_fmt(s, ent);
 744        }
 745
 746        /* No overhead */
 747        print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
 748
 749        if (type == TRACE_GRAPH_ENT)
 750                trace_seq_puts(s, "==========>");
 751        else
 752                trace_seq_puts(s, "<==========");
 753
 754        print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
 755        trace_seq_putc(s, '\n');
 756}
 757
 758void
 759trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 760{
 761        unsigned long nsecs_rem = do_div(duration, 1000);
 762        /* log10(ULONG_MAX) + '\0' */
 763        char usecs_str[21];
 764        char nsecs_str[5];
 765        int len;
 766        int i;
 767
 768        sprintf(usecs_str, "%lu", (unsigned long) duration);
 769
 770        /* Print msecs */
 771        trace_seq_printf(s, "%s", usecs_str);
 772
 773        len = strlen(usecs_str);
 774
 775        /* Print nsecs (we don't want to exceed 7 numbers) */
 776        if (len < 7) {
 777                size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
 778
 779                snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 780                trace_seq_printf(s, ".%s", nsecs_str);
 781                len += strlen(nsecs_str) + 1;
 782        }
 783
 784        trace_seq_puts(s, " us ");
 785
 786        /* Print remaining spaces to fit the row's width */
 787        for (i = len; i < 8; i++)
 788                trace_seq_putc(s, ' ');
 789}
 790
 791static void
 792print_graph_duration(struct trace_array *tr, unsigned long long duration,
 793                     struct trace_seq *s, u32 flags)
 794{
 795        if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
 796            !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 797                return;
 798
 799        /* No real adata, just filling the column with spaces */
 800        switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
 801        case FLAGS_FILL_FULL:
 802                trace_seq_puts(s, "              |  ");
 803                return;
 804        case FLAGS_FILL_START:
 805                trace_seq_puts(s, "  ");
 806                return;
 807        case FLAGS_FILL_END:
 808                trace_seq_puts(s, " |");
 809                return;
 810        }
 811
 812        /* Signal a overhead of time execution to the output */
 813        if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
 814                trace_seq_printf(s, "%c ", trace_find_mark(duration));
 815        else
 816                trace_seq_puts(s, "  ");
 817
 818        trace_print_graph_duration(duration, s);
 819        trace_seq_puts(s, "|  ");
 820}
 821
 822/* Case of a leaf function on its call entry */
 823static enum print_line_t
 824print_graph_entry_leaf(struct trace_iterator *iter,
 825                struct ftrace_graph_ent_entry *entry,
 826                struct ftrace_graph_ret_entry *ret_entry,
 827                struct trace_seq *s, u32 flags)
 828{
 829        struct fgraph_data *data = iter->private;
 830        struct trace_array *tr = iter->tr;
 831        struct ftrace_graph_ret *graph_ret;
 832        struct ftrace_graph_ent *call;
 833        unsigned long long duration;
 834        int i;
 835
 836        graph_ret = &ret_entry->ret;
 837        call = &entry->graph_ent;
 838        duration = graph_ret->rettime - graph_ret->calltime;
 839
 840        if (data) {
 841                struct fgraph_cpu_data *cpu_data;
 842                int cpu = iter->cpu;
 843
 844                cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 845
 846                /* If a graph tracer ignored set_graph_notrace */
 847                if (call->depth < -1)
 848                        call->depth += FTRACE_NOTRACE_DEPTH;
 849
 850                /*
 851                 * Comments display at + 1 to depth. Since
 852                 * this is a leaf function, keep the comments
 853                 * equal to this depth.
 854                 */
 855                cpu_data->depth = call->depth - 1;
 856
 857                /* No need to keep this function around for this depth */
 858                if (call->depth < FTRACE_RETFUNC_DEPTH &&
 859                    !WARN_ON_ONCE(call->depth < 0))
 860                        cpu_data->enter_funcs[call->depth] = 0;
 861        }
 862
 863        /* Overhead and duration */
 864        print_graph_duration(tr, duration, s, flags);
 865
 866        /* Function */
 867        for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 868                trace_seq_putc(s, ' ');
 869
 870        trace_seq_printf(s, "%ps();\n", (void *)call->func);
 871
 872        return trace_handle_return(s);
 873}
 874
 875static enum print_line_t
 876print_graph_entry_nested(struct trace_iterator *iter,
 877                         struct ftrace_graph_ent_entry *entry,
 878                         struct trace_seq *s, int cpu, u32 flags)
 879{
 880        struct ftrace_graph_ent *call = &entry->graph_ent;
 881        struct fgraph_data *data = iter->private;
 882        struct trace_array *tr = iter->tr;
 883        int i;
 884
 885        if (data) {
 886                struct fgraph_cpu_data *cpu_data;
 887                int cpu = iter->cpu;
 888
 889                /* If a graph tracer ignored set_graph_notrace */
 890                if (call->depth < -1)
 891                        call->depth += FTRACE_NOTRACE_DEPTH;
 892
 893                cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 894                cpu_data->depth = call->depth;
 895
 896                /* Save this function pointer to see if the exit matches */
 897                if (call->depth < FTRACE_RETFUNC_DEPTH &&
 898                    !WARN_ON_ONCE(call->depth < 0))
 899                        cpu_data->enter_funcs[call->depth] = call->func;
 900        }
 901
 902        /* No time */
 903        print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 904
 905        /* Function */
 906        for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 907                trace_seq_putc(s, ' ');
 908
 909        trace_seq_printf(s, "%ps() {\n", (void *)call->func);
 910
 911        if (trace_seq_has_overflowed(s))
 912                return TRACE_TYPE_PARTIAL_LINE;
 913
 914        /*
 915         * we already consumed the current entry to check the next one
 916         * and see if this is a leaf.
 917         */
 918        return TRACE_TYPE_NO_CONSUME;
 919}
 920
 921static void
 922print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 923                     int type, unsigned long addr, u32 flags)
 924{
 925        struct fgraph_data *data = iter->private;
 926        struct trace_entry *ent = iter->ent;
 927        struct trace_array *tr = iter->tr;
 928        int cpu = iter->cpu;
 929
 930        /* Pid */
 931        verif_pid(s, ent->pid, cpu, data);
 932
 933        if (type)
 934                /* Interrupt */
 935                print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
 936
 937        if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 938                return;
 939
 940        /* Absolute time */
 941        if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 942                print_graph_abs_time(iter->ts, s);
 943
 944        /* Cpu */
 945        if (flags & TRACE_GRAPH_PRINT_CPU)
 946                print_graph_cpu(s, cpu);
 947
 948        /* Proc */
 949        if (flags & TRACE_GRAPH_PRINT_PROC) {
 950                print_graph_proc(s, ent->pid);
 951                trace_seq_puts(s, " | ");
 952        }
 953
 954        /* Latency format */
 955        if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 956                print_graph_lat_fmt(s, ent);
 957
 958        return;
 959}
 960
 961/*
 962 * Entry check for irq code
 963 *
 964 * returns 1 if
 965 *  - we are inside irq code
 966 *  - we just entered irq code
 967 *
 968 * retunns 0 if
 969 *  - funcgraph-interrupts option is set
 970 *  - we are not inside irq code
 971 */
 972static int
 973check_irq_entry(struct trace_iterator *iter, u32 flags,
 974                unsigned long addr, int depth)
 975{
 976        int cpu = iter->cpu;
 977        int *depth_irq;
 978        struct fgraph_data *data = iter->private;
 979
 980        /*
 981         * If we are either displaying irqs, or we got called as
 982         * a graph event and private data does not exist,
 983         * then we bypass the irq check.
 984         */
 985        if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 986            (!data))
 987                return 0;
 988
 989        depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 990
 991        /*
 992         * We are inside the irq code
 993         */
 994        if (*depth_irq >= 0)
 995                return 1;
 996
 997        if ((addr < (unsigned long)__irqentry_text_start) ||
 998            (addr >= (unsigned long)__irqentry_text_end))
 999                return 0;
1000
1001        /*
1002         * We are entering irq code.
1003         */
1004        *depth_irq = depth;
1005        return 1;
1006}
1007
1008/*
1009 * Return check for irq code
1010 *
1011 * returns 1 if
1012 *  - we are inside irq code
1013 *  - we just left irq code
1014 *
1015 * returns 0 if
1016 *  - funcgraph-interrupts option is set
1017 *  - we are not inside irq code
1018 */
1019static int
1020check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1021{
1022        int cpu = iter->cpu;
1023        int *depth_irq;
1024        struct fgraph_data *data = iter->private;
1025
1026        /*
1027         * If we are either displaying irqs, or we got called as
1028         * a graph event and private data does not exist,
1029         * then we bypass the irq check.
1030         */
1031        if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1032            (!data))
1033                return 0;
1034
1035        depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1036
1037        /*
1038         * We are not inside the irq code.
1039         */
1040        if (*depth_irq == -1)
1041                return 0;
1042
1043        /*
1044         * We are inside the irq code, and this is returning entry.
1045         * Let's not trace it and clear the entry depth, since
1046         * we are out of irq code.
1047         *
1048         * This condition ensures that we 'leave the irq code' once
1049         * we are out of the entry depth. Thus protecting us from
1050         * the RETURN entry loss.
1051         */
1052        if (*depth_irq >= depth) {
1053                *depth_irq = -1;
1054                return 1;
1055        }
1056
1057        /*
1058         * We are inside the irq code, and this is not the entry.
1059         */
1060        return 1;
1061}
1062
1063static enum print_line_t
1064print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1065                        struct trace_iterator *iter, u32 flags)
1066{
1067        struct fgraph_data *data = iter->private;
1068        struct ftrace_graph_ent *call = &field->graph_ent;
1069        struct ftrace_graph_ret_entry *leaf_ret;
1070        static enum print_line_t ret;
1071        int cpu = iter->cpu;
1072
1073        if (check_irq_entry(iter, flags, call->func, call->depth))
1074                return TRACE_TYPE_HANDLED;
1075
1076        print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1077
1078        leaf_ret = get_return_for_leaf(iter, field);
1079        if (leaf_ret)
1080                ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1081        else
1082                ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1083
1084        if (data) {
1085                /*
1086                 * If we failed to write our output, then we need to make
1087                 * note of it. Because we already consumed our entry.
1088                 */
1089                if (s->full) {
1090                        data->failed = 1;
1091                        data->cpu = cpu;
1092                } else
1093                        data->failed = 0;
1094        }
1095
1096        return ret;
1097}
1098
1099static enum print_line_t
1100print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1101                   struct trace_entry *ent, struct trace_iterator *iter,
1102                   u32 flags)
1103{
1104        unsigned long long duration = trace->rettime - trace->calltime;
1105        struct fgraph_data *data = iter->private;
1106        struct trace_array *tr = iter->tr;
1107        pid_t pid = ent->pid;
1108        int cpu = iter->cpu;
1109        int func_match = 1;
1110        int i;
1111
1112        if (check_irq_return(iter, flags, trace->depth))
1113                return TRACE_TYPE_HANDLED;
1114
1115        if (data) {
1116                struct fgraph_cpu_data *cpu_data;
1117                int cpu = iter->cpu;
1118
1119                cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1120
1121                /*
1122                 * Comments display at + 1 to depth. This is the
1123                 * return from a function, we now want the comments
1124                 * to display at the same level of the bracket.
1125                 */
1126                cpu_data->depth = trace->depth - 1;
1127
1128                if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1129                    !WARN_ON_ONCE(trace->depth < 0)) {
1130                        if (cpu_data->enter_funcs[trace->depth] != trace->func)
1131                                func_match = 0;
1132                        cpu_data->enter_funcs[trace->depth] = 0;
1133                }
1134        }
1135
1136        print_graph_prologue(iter, s, 0, 0, flags);
1137
1138        /* Overhead and duration */
1139        print_graph_duration(tr, duration, s, flags);
1140
1141        /* Closing brace */
1142        for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1143                trace_seq_putc(s, ' ');
1144
1145        /*
1146         * If the return function does not have a matching entry,
1147         * then the entry was lost. Instead of just printing
1148         * the '}' and letting the user guess what function this
1149         * belongs to, write out the function name. Always do
1150         * that if the funcgraph-tail option is enabled.
1151         */
1152        if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1153                trace_seq_puts(s, "}\n");
1154        else
1155                trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1156
1157        /* Overrun */
1158        if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1159                trace_seq_printf(s, " (Overruns: %lu)\n",
1160                                 trace->overrun);
1161
1162        print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1163                        cpu, pid, flags);
1164
1165        return trace_handle_return(s);
1166}
1167
1168static enum print_line_t
1169print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1170                    struct trace_iterator *iter, u32 flags)
1171{
1172        struct trace_array *tr = iter->tr;
1173        unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1174        struct fgraph_data *data = iter->private;
1175        struct trace_event *event;
1176        int depth = 0;
1177        int ret;
1178        int i;
1179
1180        if (data)
1181                depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1182
1183        print_graph_prologue(iter, s, 0, 0, flags);
1184
1185        /* No time */
1186        print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1187
1188        /* Indentation */
1189        if (depth > 0)
1190                for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1191                        trace_seq_putc(s, ' ');
1192
1193        /* The comment */
1194        trace_seq_puts(s, "/* ");
1195
1196        switch (iter->ent->type) {
1197        case TRACE_BPUTS:
1198                ret = trace_print_bputs_msg_only(iter);
1199                if (ret != TRACE_TYPE_HANDLED)
1200                        return ret;
1201                break;
1202        case TRACE_BPRINT:
1203                ret = trace_print_bprintk_msg_only(iter);
1204                if (ret != TRACE_TYPE_HANDLED)
1205                        return ret;
1206                break;
1207        case TRACE_PRINT:
1208                ret = trace_print_printk_msg_only(iter);
1209                if (ret != TRACE_TYPE_HANDLED)
1210                        return ret;
1211                break;
1212        default:
1213                event = ftrace_find_event(ent->type);
1214                if (!event)
1215                        return TRACE_TYPE_UNHANDLED;
1216
1217                ret = event->funcs->trace(iter, sym_flags, event);
1218                if (ret != TRACE_TYPE_HANDLED)
1219                        return ret;
1220        }
1221
1222        if (trace_seq_has_overflowed(s))
1223                goto out;
1224
1225        /* Strip ending newline */
1226        if (s->buffer[s->seq.len - 1] == '\n') {
1227                s->buffer[s->seq.len - 1] = '\0';
1228                s->seq.len--;
1229        }
1230
1231        trace_seq_puts(s, " */\n");
1232 out:
1233        return trace_handle_return(s);
1234}
1235
1236
1237enum print_line_t
1238print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1239{
1240        struct ftrace_graph_ent_entry *field;
1241        struct fgraph_data *data = iter->private;
1242        struct trace_entry *entry = iter->ent;
1243        struct trace_seq *s = &iter->seq;
1244        int cpu = iter->cpu;
1245        int ret;
1246
1247        if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1248                per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1249                return TRACE_TYPE_HANDLED;
1250        }
1251
1252        /*
1253         * If the last output failed, there's a possibility we need
1254         * to print out the missing entry which would never go out.
1255         */
1256        if (data && data->failed) {
1257                field = &data->ent;
1258                iter->cpu = data->cpu;
1259                ret = print_graph_entry(field, s, iter, flags);
1260                if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1261                        per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1262                        ret = TRACE_TYPE_NO_CONSUME;
1263                }
1264                iter->cpu = cpu;
1265                return ret;
1266        }
1267
1268        switch (entry->type) {
1269        case TRACE_GRAPH_ENT: {
1270                /*
1271                 * print_graph_entry() may consume the current event,
1272                 * thus @field may become invalid, so we need to save it.
1273                 * sizeof(struct ftrace_graph_ent_entry) is very small,
1274                 * it can be safely saved at the stack.
1275                 */
1276                struct ftrace_graph_ent_entry saved;
1277                trace_assign_type(field, entry);
1278                saved = *field;
1279                return print_graph_entry(&saved, s, iter, flags);
1280        }
1281        case TRACE_GRAPH_RET: {
1282                struct ftrace_graph_ret_entry *field;
1283                trace_assign_type(field, entry);
1284                return print_graph_return(&field->ret, s, entry, iter, flags);
1285        }
1286        case TRACE_STACK:
1287        case TRACE_FN:
1288                /* dont trace stack and functions as comments */
1289                return TRACE_TYPE_UNHANDLED;
1290
1291        default:
1292                return print_graph_comment(s, entry, iter, flags);
1293        }
1294
1295        return TRACE_TYPE_HANDLED;
1296}
1297
1298static enum print_line_t
1299print_graph_function(struct trace_iterator *iter)
1300{
1301        return print_graph_function_flags(iter, tracer_flags.val);
1302}
1303
1304static enum print_line_t
1305print_graph_function_event(struct trace_iterator *iter, int flags,
1306                           struct trace_event *event)
1307{
1308        return print_graph_function(iter);
1309}
1310
1311static void print_lat_header(struct seq_file *s, u32 flags)
1312{
1313        static const char spaces[] = "                " /* 16 spaces */
1314                "    "                                  /* 4 spaces */
1315                "                 ";                    /* 17 spaces */
1316        int size = 0;
1317
1318        if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1319                size += 16;
1320        if (flags & TRACE_GRAPH_PRINT_CPU)
1321                size += 4;
1322        if (flags & TRACE_GRAPH_PRINT_PROC)
1323                size += 17;
1324
1325        seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1326        seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1327        seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1328        seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1329        seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1330}
1331
1332static void __print_graph_headers_flags(struct trace_array *tr,
1333                                        struct seq_file *s, u32 flags)
1334{
1335        int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1336
1337        if (lat)
1338                print_lat_header(s, flags);
1339
1340        /* 1st line */
1341        seq_putc(s, '#');
1342        if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1343                seq_puts(s, "     TIME       ");
1344        if (flags & TRACE_GRAPH_PRINT_CPU)
1345                seq_puts(s, " CPU");
1346        if (flags & TRACE_GRAPH_PRINT_PROC)
1347                seq_puts(s, "  TASK/PID       ");
1348        if (lat)
1349                seq_puts(s, "||||");
1350        if (flags & TRACE_GRAPH_PRINT_DURATION)
1351                seq_puts(s, "  DURATION   ");
1352        seq_puts(s, "               FUNCTION CALLS\n");
1353
1354        /* 2nd line */
1355        seq_putc(s, '#');
1356        if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1357                seq_puts(s, "      |         ");
1358        if (flags & TRACE_GRAPH_PRINT_CPU)
1359                seq_puts(s, " |  ");
1360        if (flags & TRACE_GRAPH_PRINT_PROC)
1361                seq_puts(s, "   |    |        ");
1362        if (lat)
1363                seq_puts(s, "||||");
1364        if (flags & TRACE_GRAPH_PRINT_DURATION)
1365                seq_puts(s, "   |   |      ");
1366        seq_puts(s, "               |   |   |   |\n");
1367}
1368
1369static void print_graph_headers(struct seq_file *s)
1370{
1371        print_graph_headers_flags(s, tracer_flags.val);
1372}
1373
1374void print_graph_headers_flags(struct seq_file *s, u32 flags)
1375{
1376        struct trace_iterator *iter = s->private;
1377        struct trace_array *tr = iter->tr;
1378
1379        if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1380                return;
1381
1382        if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1383                /* print nothing if the buffers are empty */
1384                if (trace_empty(iter))
1385                        return;
1386
1387                print_trace_header(s, iter);
1388        }
1389
1390        __print_graph_headers_flags(tr, s, flags);
1391}
1392
1393void graph_trace_open(struct trace_iterator *iter)
1394{
1395        /* pid and depth on the last trace processed */
1396        struct fgraph_data *data;
1397        gfp_t gfpflags;
1398        int cpu;
1399
1400        iter->private = NULL;
1401
1402        /* We can be called in atomic context via ftrace_dump() */
1403        gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1404
1405        data = kzalloc(sizeof(*data), gfpflags);
1406        if (!data)
1407                goto out_err;
1408
1409        data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1410        if (!data->cpu_data)
1411                goto out_err_free;
1412
1413        for_each_possible_cpu(cpu) {
1414                pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1415                int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1416                int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1417                int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1418
1419                *pid = -1;
1420                *depth = 0;
1421                *ignore = 0;
1422                *depth_irq = -1;
1423        }
1424
1425        iter->private = data;
1426
1427        return;
1428
1429 out_err_free:
1430        kfree(data);
1431 out_err:
1432        pr_warn("function graph tracer: not enough memory\n");
1433}
1434
1435void graph_trace_close(struct trace_iterator *iter)
1436{
1437        struct fgraph_data *data = iter->private;
1438
1439        if (data) {
1440                free_percpu(data->cpu_data);
1441                kfree(data);
1442        }
1443}
1444
1445static int
1446func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1447{
1448        if (bit == TRACE_GRAPH_PRINT_IRQS)
1449                ftrace_graph_skip_irqs = !set;
1450
1451        if (bit == TRACE_GRAPH_SLEEP_TIME)
1452                ftrace_graph_sleep_time_control(set);
1453
1454        if (bit == TRACE_GRAPH_GRAPH_TIME)
1455                ftrace_graph_graph_time_control(set);
1456
1457        return 0;
1458}
1459
1460static struct trace_event_functions graph_functions = {
1461        .trace          = print_graph_function_event,
1462};
1463
1464static struct trace_event graph_trace_entry_event = {
1465        .type           = TRACE_GRAPH_ENT,
1466        .funcs          = &graph_functions,
1467};
1468
1469static struct trace_event graph_trace_ret_event = {
1470        .type           = TRACE_GRAPH_RET,
1471        .funcs          = &graph_functions
1472};
1473
1474static struct tracer graph_trace __tracer_data = {
1475        .name           = "function_graph",
1476        .update_thresh  = graph_trace_update_thresh,
1477        .open           = graph_trace_open,
1478        .pipe_open      = graph_trace_open,
1479        .close          = graph_trace_close,
1480        .pipe_close     = graph_trace_close,
1481        .init           = graph_trace_init,
1482        .reset          = graph_trace_reset,
1483        .print_line     = print_graph_function,
1484        .print_header   = print_graph_headers,
1485        .flags          = &tracer_flags,
1486        .set_flag       = func_graph_set_flag,
1487#ifdef CONFIG_FTRACE_SELFTEST
1488        .selftest       = trace_selftest_startup_function_graph,
1489#endif
1490};
1491
1492
1493static ssize_t
1494graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1495                  loff_t *ppos)
1496{
1497        unsigned long val;
1498        int ret;
1499
1500        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1501        if (ret)
1502                return ret;
1503
1504        fgraph_max_depth = val;
1505
1506        *ppos += cnt;
1507
1508        return cnt;
1509}
1510
1511static ssize_t
1512graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1513                 loff_t *ppos)
1514{
1515        char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1516        int n;
1517
1518        n = sprintf(buf, "%d\n", fgraph_max_depth);
1519
1520        return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1521}
1522
1523static const struct file_operations graph_depth_fops = {
1524        .open           = tracing_open_generic,
1525        .write          = graph_depth_write,
1526        .read           = graph_depth_read,
1527        .llseek         = generic_file_llseek,
1528};
1529
1530static __init int init_graph_tracefs(void)
1531{
1532        struct dentry *d_tracer;
1533
1534        d_tracer = tracing_init_dentry();
1535        if (IS_ERR(d_tracer))
1536                return 0;
1537
1538        trace_create_file("max_graph_depth", 0644, d_tracer,
1539                          NULL, &graph_depth_fops);
1540
1541        return 0;
1542}
1543fs_initcall(init_graph_tracefs);
1544
1545static __init int init_graph_trace(void)
1546{
1547        max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1548
1549        if (!register_trace_event(&graph_trace_entry_event)) {
1550                pr_warn("Warning: could not register graph trace events\n");
1551                return 1;
1552        }
1553
1554        if (!register_trace_event(&graph_trace_ret_event)) {
1555                pr_warn("Warning: could not register graph trace events\n");
1556                return 1;
1557        }
1558
1559        return register_tracer(&graph_trace);
1560}
1561
1562core_initcall(init_graph_trace);
1563