linux/kernel/trace/trace.c
<<
>>
Prefs
   1/*
   2 * ring buffer based function tracer
   3 *
   4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally taken from the RT patch by:
   8 *    Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code from the latency_tracer, that is:
  11 *  Copyright (C) 2004-2006 Ingo Molnar
  12 *  Copyright (C) 2004 Nadia Yvette Chambers
  13 */
  14#include <linux/ring_buffer.h>
  15#include <generated/utsrelease.h>
  16#include <linux/stacktrace.h>
  17#include <linux/writeback.h>
  18#include <linux/kallsyms.h>
  19#include <linux/seq_file.h>
  20#include <linux/notifier.h>
  21#include <linux/irqflags.h>
  22#include <linux/debugfs.h>
  23#include <linux/tracefs.h>
  24#include <linux/pagemap.h>
  25#include <linux/hardirq.h>
  26#include <linux/linkage.h>
  27#include <linux/uaccess.h>
  28#include <linux/vmalloc.h>
  29#include <linux/ftrace.h>
  30#include <linux/module.h>
  31#include <linux/percpu.h>
  32#include <linux/splice.h>
  33#include <linux/kdebug.h>
  34#include <linux/string.h>
  35#include <linux/mount.h>
  36#include <linux/rwsem.h>
  37#include <linux/slab.h>
  38#include <linux/ctype.h>
  39#include <linux/init.h>
  40#include <linux/poll.h>
  41#include <linux/nmi.h>
  42#include <linux/fs.h>
  43#include <linux/trace.h>
  44#include <linux/sched/rt.h>
  45
  46#include "trace.h"
  47#include "trace_output.h"
  48
  49/*
  50 * On boot up, the ring buffer is set to the minimum size, so that
  51 * we do not waste memory on systems that are not using tracing.
  52 */
  53bool ring_buffer_expanded;
  54
  55/*
  56 * We need to change this state when a selftest is running.
  57 * A selftest will lurk into the ring-buffer to count the
  58 * entries inserted during the selftest although some concurrent
  59 * insertions into the ring-buffer such as trace_printk could occurred
  60 * at the same time, giving false positive or negative results.
  61 */
  62static bool __read_mostly tracing_selftest_running;
  63
  64/*
  65 * If a tracer is running, we do not want to run SELFTEST.
  66 */
  67bool __read_mostly tracing_selftest_disabled;
  68
  69/* Pipe tracepoints to printk */
  70struct trace_iterator *tracepoint_print_iter;
  71int tracepoint_printk;
  72static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
  73
  74/* For tracers that don't implement custom flags */
  75static struct tracer_opt dummy_tracer_opt[] = {
  76        { }
  77};
  78
  79static int
  80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  81{
  82        return 0;
  83}
  84
  85/*
  86 * To prevent the comm cache from being overwritten when no
  87 * tracing is active, only save the comm when a trace event
  88 * occurred.
  89 */
  90static DEFINE_PER_CPU(bool, trace_taskinfo_save);
  91
  92/*
  93 * Kill all tracing for good (never come back).
  94 * It is initialized to 1 but will turn to zero if the initialization
  95 * of the tracer is successful. But that is the only place that sets
  96 * this back to zero.
  97 */
  98static int tracing_disabled = 1;
  99
 100cpumask_var_t __read_mostly     tracing_buffer_mask;
 101
 102/*
 103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 104 *
 105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 106 * is set, then ftrace_dump is called. This will output the contents
 107 * of the ftrace buffers to the console.  This is very useful for
 108 * capturing traces that lead to crashes and outputing it to a
 109 * serial console.
 110 *
 111 * It is default off, but you can enable it with either specifying
 112 * "ftrace_dump_on_oops" in the kernel command line, or setting
 113 * /proc/sys/kernel/ftrace_dump_on_oops
 114 * Set 1 if you want to dump buffers of all CPUs
 115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
 116 */
 117
 118enum ftrace_dump_mode ftrace_dump_on_oops;
 119
 120/* When set, tracing will stop when a WARN*() is hit */
 121int __disable_trace_on_warning;
 122
 123#ifdef CONFIG_TRACE_EVAL_MAP_FILE
 124/* Map of enums to their values, for "eval_map" file */
 125struct trace_eval_map_head {
 126        struct module                   *mod;
 127        unsigned long                   length;
 128};
 129
 130union trace_eval_map_item;
 131
 132struct trace_eval_map_tail {
 133        /*
 134         * "end" is first and points to NULL as it must be different
 135         * than "mod" or "eval_string"
 136         */
 137        union trace_eval_map_item       *next;
 138        const char                      *end;   /* points to NULL */
 139};
 140
 141static DEFINE_MUTEX(trace_eval_mutex);
 142
 143/*
 144 * The trace_eval_maps are saved in an array with two extra elements,
 145 * one at the beginning, and one at the end. The beginning item contains
 146 * the count of the saved maps (head.length), and the module they
 147 * belong to if not built in (head.mod). The ending item contains a
 148 * pointer to the next array of saved eval_map items.
 149 */
 150union trace_eval_map_item {
 151        struct trace_eval_map           map;
 152        struct trace_eval_map_head      head;
 153        struct trace_eval_map_tail      tail;
 154};
 155
 156static union trace_eval_map_item *trace_eval_maps;
 157#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
 158
 159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
 160
 161#define MAX_TRACER_SIZE         100
 162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
 163static char *default_bootup_tracer;
 164
 165static bool allocate_snapshot;
 166
 167static int __init set_cmdline_ftrace(char *str)
 168{
 169        strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
 170        default_bootup_tracer = bootup_tracer_buf;
 171        /* We are using ftrace early, expand it */
 172        ring_buffer_expanded = true;
 173        return 1;
 174}
 175__setup("ftrace=", set_cmdline_ftrace);
 176
 177static int __init set_ftrace_dump_on_oops(char *str)
 178{
 179        if (*str++ != '=' || !*str) {
 180                ftrace_dump_on_oops = DUMP_ALL;
 181                return 1;
 182        }
 183
 184        if (!strcmp("orig_cpu", str)) {
 185                ftrace_dump_on_oops = DUMP_ORIG;
 186                return 1;
 187        }
 188
 189        return 0;
 190}
 191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 192
 193static int __init stop_trace_on_warning(char *str)
 194{
 195        if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
 196                __disable_trace_on_warning = 1;
 197        return 1;
 198}
 199__setup("traceoff_on_warning", stop_trace_on_warning);
 200
 201static int __init boot_alloc_snapshot(char *str)
 202{
 203        allocate_snapshot = true;
 204        /* We also need the main ring buffer expanded */
 205        ring_buffer_expanded = true;
 206        return 1;
 207}
 208__setup("alloc_snapshot", boot_alloc_snapshot);
 209
 210
 211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
 212
 213static int __init set_trace_boot_options(char *str)
 214{
 215        strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
 216        return 0;
 217}
 218__setup("trace_options=", set_trace_boot_options);
 219
 220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
 221static char *trace_boot_clock __initdata;
 222
 223static int __init set_trace_boot_clock(char *str)
 224{
 225        strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
 226        trace_boot_clock = trace_boot_clock_buf;
 227        return 0;
 228}
 229__setup("trace_clock=", set_trace_boot_clock);
 230
 231static int __init set_tracepoint_printk(char *str)
 232{
 233        if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
 234                tracepoint_printk = 1;
 235        return 1;
 236}
 237__setup("tp_printk", set_tracepoint_printk);
 238
 239unsigned long long ns2usecs(u64 nsec)
 240{
 241        nsec += 500;
 242        do_div(nsec, 1000);
 243        return nsec;
 244}
 245
 246/* trace_flags holds trace_options default values */
 247#define TRACE_DEFAULT_FLAGS                                             \
 248        (FUNCTION_DEFAULT_FLAGS |                                       \
 249         TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |                  \
 250         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |                \
 251         TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |                 \
 252         TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
 253
 254/* trace_options that are only supported by global_trace */
 255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |                      \
 256               TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
 257
 258/* trace_flags that are default zero for instances */
 259#define ZEROED_TRACE_FLAGS \
 260        (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
 261
 262/*
 263 * The global_trace is the descriptor that holds the top-level tracing
 264 * buffers for the live tracing.
 265 */
 266static struct trace_array global_trace = {
 267        .trace_flags = TRACE_DEFAULT_FLAGS,
 268};
 269
 270LIST_HEAD(ftrace_trace_arrays);
 271
 272int trace_array_get(struct trace_array *this_tr)
 273{
 274        struct trace_array *tr;
 275        int ret = -ENODEV;
 276
 277        mutex_lock(&trace_types_lock);
 278        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 279                if (tr == this_tr) {
 280                        tr->ref++;
 281                        ret = 0;
 282                        break;
 283                }
 284        }
 285        mutex_unlock(&trace_types_lock);
 286
 287        return ret;
 288}
 289
 290static void __trace_array_put(struct trace_array *this_tr)
 291{
 292        WARN_ON(!this_tr->ref);
 293        this_tr->ref--;
 294}
 295
 296void trace_array_put(struct trace_array *this_tr)
 297{
 298        mutex_lock(&trace_types_lock);
 299        __trace_array_put(this_tr);
 300        mutex_unlock(&trace_types_lock);
 301}
 302
 303int call_filter_check_discard(struct trace_event_call *call, void *rec,
 304                              struct ring_buffer *buffer,
 305                              struct ring_buffer_event *event)
 306{
 307        if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
 308            !filter_match_preds(call->filter, rec)) {
 309                __trace_event_discard_commit(buffer, event);
 310                return 1;
 311        }
 312
 313        return 0;
 314}
 315
 316void trace_free_pid_list(struct trace_pid_list *pid_list)
 317{
 318        vfree(pid_list->pids);
 319        kfree(pid_list);
 320}
 321
 322/**
 323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
 324 * @filtered_pids: The list of pids to check
 325 * @search_pid: The PID to find in @filtered_pids
 326 *
 327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
 328 */
 329bool
 330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
 331{
 332        /*
 333         * If pid_max changed after filtered_pids was created, we
 334         * by default ignore all pids greater than the previous pid_max.
 335         */
 336        if (search_pid >= filtered_pids->pid_max)
 337                return false;
 338
 339        return test_bit(search_pid, filtered_pids->pids);
 340}
 341
 342/**
 343 * trace_ignore_this_task - should a task be ignored for tracing
 344 * @filtered_pids: The list of pids to check
 345 * @task: The task that should be ignored if not filtered
 346 *
 347 * Checks if @task should be traced or not from @filtered_pids.
 348 * Returns true if @task should *NOT* be traced.
 349 * Returns false if @task should be traced.
 350 */
 351bool
 352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
 353{
 354        /*
 355         * Return false, because if filtered_pids does not exist,
 356         * all pids are good to trace.
 357         */
 358        if (!filtered_pids)
 359                return false;
 360
 361        return !trace_find_filtered_pid(filtered_pids, task->pid);
 362}
 363
 364/**
 365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
 366 * @pid_list: The list to modify
 367 * @self: The current task for fork or NULL for exit
 368 * @task: The task to add or remove
 369 *
 370 * If adding a task, if @self is defined, the task is only added if @self
 371 * is also included in @pid_list. This happens on fork and tasks should
 372 * only be added when the parent is listed. If @self is NULL, then the
 373 * @task pid will be removed from the list, which would happen on exit
 374 * of a task.
 375 */
 376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
 377                                  struct task_struct *self,
 378                                  struct task_struct *task)
 379{
 380        if (!pid_list)
 381                return;
 382
 383        /* For forks, we only add if the forking task is listed */
 384        if (self) {
 385                if (!trace_find_filtered_pid(pid_list, self->pid))
 386                        return;
 387        }
 388
 389        /* Sorry, but we don't support pid_max changing after setting */
 390        if (task->pid >= pid_list->pid_max)
 391                return;
 392
 393        /* "self" is set for forks, and NULL for exits */
 394        if (self)
 395                set_bit(task->pid, pid_list->pids);
 396        else
 397                clear_bit(task->pid, pid_list->pids);
 398}
 399
 400/**
 401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
 402 * @pid_list: The pid list to show
 403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
 404 * @pos: The position of the file
 405 *
 406 * This is used by the seq_file "next" operation to iterate the pids
 407 * listed in a trace_pid_list structure.
 408 *
 409 * Returns the pid+1 as we want to display pid of zero, but NULL would
 410 * stop the iteration.
 411 */
 412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
 413{
 414        unsigned long pid = (unsigned long)v;
 415
 416        (*pos)++;
 417
 418        /* pid already is +1 of the actual prevous bit */
 419        pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
 420
 421        /* Return pid + 1 to allow zero to be represented */
 422        if (pid < pid_list->pid_max)
 423                return (void *)(pid + 1);
 424
 425        return NULL;
 426}
 427
 428/**
 429 * trace_pid_start - Used for seq_file to start reading pid lists
 430 * @pid_list: The pid list to show
 431 * @pos: The position of the file
 432 *
 433 * This is used by seq_file "start" operation to start the iteration
 434 * of listing pids.
 435 *
 436 * Returns the pid+1 as we want to display pid of zero, but NULL would
 437 * stop the iteration.
 438 */
 439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
 440{
 441        unsigned long pid;
 442        loff_t l = 0;
 443
 444        pid = find_first_bit(pid_list->pids, pid_list->pid_max);
 445        if (pid >= pid_list->pid_max)
 446                return NULL;
 447
 448        /* Return pid + 1 so that zero can be the exit value */
 449        for (pid++; pid && l < *pos;
 450             pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
 451                ;
 452        return (void *)pid;
 453}
 454
 455/**
 456 * trace_pid_show - show the current pid in seq_file processing
 457 * @m: The seq_file structure to write into
 458 * @v: A void pointer of the pid (+1) value to display
 459 *
 460 * Can be directly used by seq_file operations to display the current
 461 * pid value.
 462 */
 463int trace_pid_show(struct seq_file *m, void *v)
 464{
 465        unsigned long pid = (unsigned long)v - 1;
 466
 467        seq_printf(m, "%lu\n", pid);
 468        return 0;
 469}
 470
 471/* 128 should be much more than enough */
 472#define PID_BUF_SIZE            127
 473
 474int trace_pid_write(struct trace_pid_list *filtered_pids,
 475                    struct trace_pid_list **new_pid_list,
 476                    const char __user *ubuf, size_t cnt)
 477{
 478        struct trace_pid_list *pid_list;
 479        struct trace_parser parser;
 480        unsigned long val;
 481        int nr_pids = 0;
 482        ssize_t read = 0;
 483        ssize_t ret = 0;
 484        loff_t pos;
 485        pid_t pid;
 486
 487        if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
 488                return -ENOMEM;
 489
 490        /*
 491         * Always recreate a new array. The write is an all or nothing
 492         * operation. Always create a new array when adding new pids by
 493         * the user. If the operation fails, then the current list is
 494         * not modified.
 495         */
 496        pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
 497        if (!pid_list)
 498                return -ENOMEM;
 499
 500        pid_list->pid_max = READ_ONCE(pid_max);
 501
 502        /* Only truncating will shrink pid_max */
 503        if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
 504                pid_list->pid_max = filtered_pids->pid_max;
 505
 506        pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
 507        if (!pid_list->pids) {
 508                kfree(pid_list);
 509                return -ENOMEM;
 510        }
 511
 512        if (filtered_pids) {
 513                /* copy the current bits to the new max */
 514                for_each_set_bit(pid, filtered_pids->pids,
 515                                 filtered_pids->pid_max) {
 516                        set_bit(pid, pid_list->pids);
 517                        nr_pids++;
 518                }
 519        }
 520
 521        while (cnt > 0) {
 522
 523                pos = 0;
 524
 525                ret = trace_get_user(&parser, ubuf, cnt, &pos);
 526                if (ret < 0 || !trace_parser_loaded(&parser))
 527                        break;
 528
 529                read += ret;
 530                ubuf += ret;
 531                cnt -= ret;
 532
 533                parser.buffer[parser.idx] = 0;
 534
 535                ret = -EINVAL;
 536                if (kstrtoul(parser.buffer, 0, &val))
 537                        break;
 538                if (val >= pid_list->pid_max)
 539                        break;
 540
 541                pid = (pid_t)val;
 542
 543                set_bit(pid, pid_list->pids);
 544                nr_pids++;
 545
 546                trace_parser_clear(&parser);
 547                ret = 0;
 548        }
 549        trace_parser_put(&parser);
 550
 551        if (ret < 0) {
 552                trace_free_pid_list(pid_list);
 553                return ret;
 554        }
 555
 556        if (!nr_pids) {
 557                /* Cleared the list of pids */
 558                trace_free_pid_list(pid_list);
 559                read = ret;
 560                pid_list = NULL;
 561        }
 562
 563        *new_pid_list = pid_list;
 564
 565        return read;
 566}
 567
 568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 569{
 570        u64 ts;
 571
 572        /* Early boot up does not have a buffer yet */
 573        if (!buf->buffer)
 574                return trace_clock_local();
 575
 576        ts = ring_buffer_time_stamp(buf->buffer, cpu);
 577        ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
 578
 579        return ts;
 580}
 581
 582u64 ftrace_now(int cpu)
 583{
 584        return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
 585}
 586
 587/**
 588 * tracing_is_enabled - Show if global_trace has been disabled
 589 *
 590 * Shows if the global trace has been enabled or not. It uses the
 591 * mirror flag "buffer_disabled" to be used in fast paths such as for
 592 * the irqsoff tracer. But it may be inaccurate due to races. If you
 593 * need to know the accurate state, use tracing_is_on() which is a little
 594 * slower, but accurate.
 595 */
 596int tracing_is_enabled(void)
 597{
 598        /*
 599         * For quick access (irqsoff uses this in fast path), just
 600         * return the mirror variable of the state of the ring buffer.
 601         * It's a little racy, but we don't really care.
 602         */
 603        smp_rmb();
 604        return !global_trace.buffer_disabled;
 605}
 606
 607/*
 608 * trace_buf_size is the size in bytes that is allocated
 609 * for a buffer. Note, the number of bytes is always rounded
 610 * to page size.
 611 *
 612 * This number is purposely set to a low number of 16384.
 613 * If the dump on oops happens, it will be much appreciated
 614 * to not have to wait for all that output. Anyway this can be
 615 * boot time and run time configurable.
 616 */
 617#define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
 618
 619static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
 620
 621/* trace_types holds a link list of available tracers. */
 622static struct tracer            *trace_types __read_mostly;
 623
 624/*
 625 * trace_types_lock is used to protect the trace_types list.
 626 */
 627DEFINE_MUTEX(trace_types_lock);
 628
 629/*
 630 * serialize the access of the ring buffer
 631 *
 632 * ring buffer serializes readers, but it is low level protection.
 633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 634 * are not protected by ring buffer.
 635 *
 636 * The content of events may become garbage if we allow other process consumes
 637 * these events concurrently:
 638 *   A) the page of the consumed events may become a normal page
 639 *      (not reader page) in ring buffer, and this page will be rewrited
 640 *      by events producer.
 641 *   B) The page of the consumed events may become a page for splice_read,
 642 *      and this page will be returned to system.
 643 *
 644 * These primitives allow multi process access to different cpu ring buffer
 645 * concurrently.
 646 *
 647 * These primitives don't distinguish read-only and read-consume access.
 648 * Multi read-only access are also serialized.
 649 */
 650
 651#ifdef CONFIG_SMP
 652static DECLARE_RWSEM(all_cpu_access_lock);
 653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
 654
 655static inline void trace_access_lock(int cpu)
 656{
 657        if (cpu == RING_BUFFER_ALL_CPUS) {
 658                /* gain it for accessing the whole ring buffer. */
 659                down_write(&all_cpu_access_lock);
 660        } else {
 661                /* gain it for accessing a cpu ring buffer. */
 662
 663                /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
 664                down_read(&all_cpu_access_lock);
 665
 666                /* Secondly block other access to this @cpu ring buffer. */
 667                mutex_lock(&per_cpu(cpu_access_lock, cpu));
 668        }
 669}
 670
 671static inline void trace_access_unlock(int cpu)
 672{
 673        if (cpu == RING_BUFFER_ALL_CPUS) {
 674                up_write(&all_cpu_access_lock);
 675        } else {
 676                mutex_unlock(&per_cpu(cpu_access_lock, cpu));
 677                up_read(&all_cpu_access_lock);
 678        }
 679}
 680
 681static inline void trace_access_lock_init(void)
 682{
 683        int cpu;
 684
 685        for_each_possible_cpu(cpu)
 686                mutex_init(&per_cpu(cpu_access_lock, cpu));
 687}
 688
 689#else
 690
 691static DEFINE_MUTEX(access_lock);
 692
 693static inline void trace_access_lock(int cpu)
 694{
 695        (void)cpu;
 696        mutex_lock(&access_lock);
 697}
 698
 699static inline void trace_access_unlock(int cpu)
 700{
 701        (void)cpu;
 702        mutex_unlock(&access_lock);
 703}
 704
 705static inline void trace_access_lock_init(void)
 706{
 707}
 708
 709#endif
 710
 711#ifdef CONFIG_STACKTRACE
 712static void __ftrace_trace_stack(struct ring_buffer *buffer,
 713                                 unsigned long flags,
 714                                 int skip, int pc, struct pt_regs *regs);
 715static inline void ftrace_trace_stack(struct trace_array *tr,
 716                                      struct ring_buffer *buffer,
 717                                      unsigned long flags,
 718                                      int skip, int pc, struct pt_regs *regs);
 719
 720#else
 721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
 722                                        unsigned long flags,
 723                                        int skip, int pc, struct pt_regs *regs)
 724{
 725}
 726static inline void ftrace_trace_stack(struct trace_array *tr,
 727                                      struct ring_buffer *buffer,
 728                                      unsigned long flags,
 729                                      int skip, int pc, struct pt_regs *regs)
 730{
 731}
 732
 733#endif
 734
 735static __always_inline void
 736trace_event_setup(struct ring_buffer_event *event,
 737                  int type, unsigned long flags, int pc)
 738{
 739        struct trace_entry *ent = ring_buffer_event_data(event);
 740
 741        tracing_generic_entry_update(ent, flags, pc);
 742        ent->type = type;
 743}
 744
 745static __always_inline struct ring_buffer_event *
 746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
 747                          int type,
 748                          unsigned long len,
 749                          unsigned long flags, int pc)
 750{
 751        struct ring_buffer_event *event;
 752
 753        event = ring_buffer_lock_reserve(buffer, len);
 754        if (event != NULL)
 755                trace_event_setup(event, type, flags, pc);
 756
 757        return event;
 758}
 759
 760void tracer_tracing_on(struct trace_array *tr)
 761{
 762        if (tr->trace_buffer.buffer)
 763                ring_buffer_record_on(tr->trace_buffer.buffer);
 764        /*
 765         * This flag is looked at when buffers haven't been allocated
 766         * yet, or by some tracers (like irqsoff), that just want to
 767         * know if the ring buffer has been disabled, but it can handle
 768         * races of where it gets disabled but we still do a record.
 769         * As the check is in the fast path of the tracers, it is more
 770         * important to be fast than accurate.
 771         */
 772        tr->buffer_disabled = 0;
 773        /* Make the flag seen by readers */
 774        smp_wmb();
 775}
 776
 777/**
 778 * tracing_on - enable tracing buffers
 779 *
 780 * This function enables tracing buffers that may have been
 781 * disabled with tracing_off.
 782 */
 783void tracing_on(void)
 784{
 785        tracer_tracing_on(&global_trace);
 786}
 787EXPORT_SYMBOL_GPL(tracing_on);
 788
 789
 790static __always_inline void
 791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
 792{
 793        __this_cpu_write(trace_taskinfo_save, true);
 794
 795        /* If this is the temp buffer, we need to commit fully */
 796        if (this_cpu_read(trace_buffered_event) == event) {
 797                /* Length is in event->array[0] */
 798                ring_buffer_write(buffer, event->array[0], &event->array[1]);
 799                /* Release the temp buffer */
 800                this_cpu_dec(trace_buffered_event_cnt);
 801        } else
 802                ring_buffer_unlock_commit(buffer, event);
 803}
 804
 805/**
 806 * __trace_puts - write a constant string into the trace buffer.
 807 * @ip:    The address of the caller
 808 * @str:   The constant string to write
 809 * @size:  The size of the string.
 810 */
 811int __trace_puts(unsigned long ip, const char *str, int size)
 812{
 813        struct ring_buffer_event *event;
 814        struct ring_buffer *buffer;
 815        struct print_entry *entry;
 816        unsigned long irq_flags;
 817        int alloc;
 818        int pc;
 819
 820        if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 821                return 0;
 822
 823        pc = preempt_count();
 824
 825        if (unlikely(tracing_selftest_running || tracing_disabled))
 826                return 0;
 827
 828        alloc = sizeof(*entry) + size + 2; /* possible \n added */
 829
 830        local_save_flags(irq_flags);
 831        buffer = global_trace.trace_buffer.buffer;
 832        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
 833                                            irq_flags, pc);
 834        if (!event)
 835                return 0;
 836
 837        entry = ring_buffer_event_data(event);
 838        entry->ip = ip;
 839
 840        memcpy(&entry->buf, str, size);
 841
 842        /* Add a newline if necessary */
 843        if (entry->buf[size - 1] != '\n') {
 844                entry->buf[size] = '\n';
 845                entry->buf[size + 1] = '\0';
 846        } else
 847                entry->buf[size] = '\0';
 848
 849        __buffer_unlock_commit(buffer, event);
 850        ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
 851
 852        return size;
 853}
 854EXPORT_SYMBOL_GPL(__trace_puts);
 855
 856/**
 857 * __trace_bputs - write the pointer to a constant string into trace buffer
 858 * @ip:    The address of the caller
 859 * @str:   The constant string to write to the buffer to
 860 */
 861int __trace_bputs(unsigned long ip, const char *str)
 862{
 863        struct ring_buffer_event *event;
 864        struct ring_buffer *buffer;
 865        struct bputs_entry *entry;
 866        unsigned long irq_flags;
 867        int size = sizeof(struct bputs_entry);
 868        int pc;
 869
 870        if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 871                return 0;
 872
 873        pc = preempt_count();
 874
 875        if (unlikely(tracing_selftest_running || tracing_disabled))
 876                return 0;
 877
 878        local_save_flags(irq_flags);
 879        buffer = global_trace.trace_buffer.buffer;
 880        event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 881                                            irq_flags, pc);
 882        if (!event)
 883                return 0;
 884
 885        entry = ring_buffer_event_data(event);
 886        entry->ip                       = ip;
 887        entry->str                      = str;
 888
 889        __buffer_unlock_commit(buffer, event);
 890        ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
 891
 892        return 1;
 893}
 894EXPORT_SYMBOL_GPL(__trace_bputs);
 895
 896#ifdef CONFIG_TRACER_SNAPSHOT
 897static void tracing_snapshot_instance(struct trace_array *tr)
 898{
 899        struct tracer *tracer = tr->current_trace;
 900        unsigned long flags;
 901
 902        if (in_nmi()) {
 903                internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
 904                internal_trace_puts("*** snapshot is being ignored        ***\n");
 905                return;
 906        }
 907
 908        if (!tr->allocated_snapshot) {
 909                internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
 910                internal_trace_puts("*** stopping trace here!   ***\n");
 911                tracing_off();
 912                return;
 913        }
 914
 915        /* Note, snapshot can not be used when the tracer uses it */
 916        if (tracer->use_max_tr) {
 917                internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
 918                internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
 919                return;
 920        }
 921
 922        local_irq_save(flags);
 923        update_max_tr(tr, current, smp_processor_id());
 924        local_irq_restore(flags);
 925}
 926
 927/**
 928 * trace_snapshot - take a snapshot of the current buffer.
 929 *
 930 * This causes a swap between the snapshot buffer and the current live
 931 * tracing buffer. You can use this to take snapshots of the live
 932 * trace when some condition is triggered, but continue to trace.
 933 *
 934 * Note, make sure to allocate the snapshot with either
 935 * a tracing_snapshot_alloc(), or by doing it manually
 936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
 937 *
 938 * If the snapshot buffer is not allocated, it will stop tracing.
 939 * Basically making a permanent snapshot.
 940 */
 941void tracing_snapshot(void)
 942{
 943        struct trace_array *tr = &global_trace;
 944
 945        tracing_snapshot_instance(tr);
 946}
 947EXPORT_SYMBOL_GPL(tracing_snapshot);
 948
 949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
 950                                        struct trace_buffer *size_buf, int cpu_id);
 951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
 952
 953static int alloc_snapshot(struct trace_array *tr)
 954{
 955        int ret;
 956
 957        if (!tr->allocated_snapshot) {
 958
 959                /* allocate spare buffer */
 960                ret = resize_buffer_duplicate_size(&tr->max_buffer,
 961                                   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
 962                if (ret < 0)
 963                        return ret;
 964
 965                tr->allocated_snapshot = true;
 966        }
 967
 968        return 0;
 969}
 970
 971static void free_snapshot(struct trace_array *tr)
 972{
 973        /*
 974         * We don't free the ring buffer. instead, resize it because
 975         * The max_tr ring buffer has some state (e.g. ring->clock) and
 976         * we want preserve it.
 977         */
 978        ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
 979        set_buffer_entries(&tr->max_buffer, 1);
 980        tracing_reset_online_cpus(&tr->max_buffer);
 981        tr->allocated_snapshot = false;
 982}
 983
 984/**
 985 * tracing_alloc_snapshot - allocate snapshot buffer.
 986 *
 987 * This only allocates the snapshot buffer if it isn't already
 988 * allocated - it doesn't also take a snapshot.
 989 *
 990 * This is meant to be used in cases where the snapshot buffer needs
 991 * to be set up for events that can't sleep but need to be able to
 992 * trigger a snapshot.
 993 */
 994int tracing_alloc_snapshot(void)
 995{
 996        struct trace_array *tr = &global_trace;
 997        int ret;
 998
 999        ret = alloc_snapshot(tr);
1000        WARN_ON(ret < 0);
1001
1002        return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
1006/**
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 *
1009 * This is similar to trace_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
1019        int ret;
1020
1021        ret = tracing_alloc_snapshot();
1022        if (ret < 0)
1023                return;
1024
1025        tracing_snapshot();
1026}
1027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1028#else
1029void tracing_snapshot(void)
1030{
1031        WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
1033EXPORT_SYMBOL_GPL(tracing_snapshot);
1034int tracing_alloc_snapshot(void)
1035{
1036        WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037        return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1040void tracing_snapshot_alloc(void)
1041{
1042        /* Give warning */
1043        tracing_snapshot();
1044}
1045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
1048void tracer_tracing_off(struct trace_array *tr)
1049{
1050        if (tr->trace_buffer.buffer)
1051                ring_buffer_record_off(tr->trace_buffer.buffer);
1052        /*
1053         * This flag is looked at when buffers haven't been allocated
1054         * yet, or by some tracers (like irqsoff), that just want to
1055         * know if the ring buffer has been disabled, but it can handle
1056         * races of where it gets disabled but we still do a record.
1057         * As the check is in the fast path of the tracers, it is more
1058         * important to be fast than accurate.
1059         */
1060        tr->buffer_disabled = 1;
1061        /* Make the flag seen by readers */
1062        smp_wmb();
1063}
1064
1065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
1075        tracer_tracing_off(&global_trace);
1076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
1079void disable_trace_on_warning(void)
1080{
1081        if (__disable_trace_on_warning)
1082                tracing_off();
1083}
1084
1085/**
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
1091int tracer_tracing_is_on(struct trace_array *tr)
1092{
1093        if (tr->trace_buffer.buffer)
1094                return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095        return !tr->buffer_disabled;
1096}
1097
1098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
1103        return tracer_tracing_is_on(&global_trace);
1104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
1107static int __init set_buf_size(char *str)
1108{
1109        unsigned long buf_size;
1110
1111        if (!str)
1112                return 0;
1113        buf_size = memparse(str, &str);
1114        /* nr_entries can not be zero */
1115        if (buf_size == 0)
1116                return 0;
1117        trace_buf_size = buf_size;
1118        return 1;
1119}
1120__setup("trace_buf_size=", set_buf_size);
1121
1122static int __init set_tracing_thresh(char *str)
1123{
1124        unsigned long threshold;
1125        int ret;
1126
1127        if (!str)
1128                return 0;
1129        ret = kstrtoul(str, 0, &threshold);
1130        if (ret < 0)
1131                return 0;
1132        tracing_thresh = threshold * 1000;
1133        return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
1137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139        return nsecs / 1000;
1140}
1141
1142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146 * of strings in the order that the evals (enum) were defined.
1147 */
1148#undef C
1149#define C(a, b) b
1150
1151/* These must match the bit postions in trace_iterator_flags */
1152static const char *trace_options[] = {
1153        TRACE_FLAGS
1154        NULL
1155};
1156
1157static struct {
1158        u64 (*func)(void);
1159        const char *name;
1160        int in_ns;              /* is this clock in nanoseconds? */
1161} trace_clocks[] = {
1162        { trace_clock_local,            "local",        1 },
1163        { trace_clock_global,           "global",       1 },
1164        { trace_clock_counter,          "counter",      0 },
1165        { trace_clock_jiffies,          "uptime",       0 },
1166        { trace_clock,                  "perf",         1 },
1167        { ktime_get_mono_fast_ns,       "mono",         1 },
1168        { ktime_get_raw_fast_ns,        "mono_raw",     1 },
1169        { ktime_get_boot_fast_ns,       "boot",         1 },
1170        ARCH_TRACE_CLOCKS
1171};
1172
1173/*
1174 * trace_parser_get_init - gets the buffer for trace parser
1175 */
1176int trace_parser_get_init(struct trace_parser *parser, int size)
1177{
1178        memset(parser, 0, sizeof(*parser));
1179
1180        parser->buffer = kmalloc(size, GFP_KERNEL);
1181        if (!parser->buffer)
1182                return 1;
1183
1184        parser->size = size;
1185        return 0;
1186}
1187
1188/*
1189 * trace_parser_put - frees the buffer for trace parser
1190 */
1191void trace_parser_put(struct trace_parser *parser)
1192{
1193        kfree(parser->buffer);
1194        parser->buffer = NULL;
1195}
1196
1197/*
1198 * trace_get_user - reads the user input string separated by  space
1199 * (matched by isspace(ch))
1200 *
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1203 *
1204 * Returns number of bytes read.
1205 *
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1207 */
1208int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209        size_t cnt, loff_t *ppos)
1210{
1211        char ch;
1212        size_t read = 0;
1213        ssize_t ret;
1214
1215        if (!*ppos)
1216                trace_parser_clear(parser);
1217
1218        ret = get_user(ch, ubuf++);
1219        if (ret)
1220                goto out;
1221
1222        read++;
1223        cnt--;
1224
1225        /*
1226         * The parser is not finished with the last write,
1227         * continue reading the user input without skipping spaces.
1228         */
1229        if (!parser->cont) {
1230                /* skip white space */
1231                while (cnt && isspace(ch)) {
1232                        ret = get_user(ch, ubuf++);
1233                        if (ret)
1234                                goto out;
1235                        read++;
1236                        cnt--;
1237                }
1238
1239                /* only spaces were written */
1240                if (isspace(ch)) {
1241                        *ppos += read;
1242                        ret = read;
1243                        goto out;
1244                }
1245
1246                parser->idx = 0;
1247        }
1248
1249        /* read the non-space input */
1250        while (cnt && !isspace(ch)) {
1251                if (parser->idx < parser->size - 1)
1252                        parser->buffer[parser->idx++] = ch;
1253                else {
1254                        ret = -EINVAL;
1255                        goto out;
1256                }
1257                ret = get_user(ch, ubuf++);
1258                if (ret)
1259                        goto out;
1260                read++;
1261                cnt--;
1262        }
1263
1264        /* We either got finished input or we have to wait for another call. */
1265        if (isspace(ch)) {
1266                parser->buffer[parser->idx] = 0;
1267                parser->cont = false;
1268        } else if (parser->idx < parser->size - 1) {
1269                parser->cont = true;
1270                parser->buffer[parser->idx++] = ch;
1271        } else {
1272                ret = -EINVAL;
1273                goto out;
1274        }
1275
1276        *ppos += read;
1277        ret = read;
1278
1279out:
1280        return ret;
1281}
1282
1283/* TODO add a seq_buf_to_buffer() */
1284static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1285{
1286        int len;
1287
1288        if (trace_seq_used(s) <= s->seq.readpos)
1289                return -EBUSY;
1290
1291        len = trace_seq_used(s) - s->seq.readpos;
1292        if (cnt > len)
1293                cnt = len;
1294        memcpy(buf, s->buffer + s->seq.readpos, cnt);
1295
1296        s->seq.readpos += cnt;
1297        return cnt;
1298}
1299
1300unsigned long __read_mostly     tracing_thresh;
1301
1302#ifdef CONFIG_TRACER_MAX_TRACE
1303/*
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1307 */
1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310{
1311        struct trace_buffer *trace_buf = &tr->trace_buffer;
1312        struct trace_buffer *max_buf = &tr->max_buffer;
1313        struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314        struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1315
1316        max_buf->cpu = cpu;
1317        max_buf->time_start = data->preempt_timestamp;
1318
1319        max_data->saved_latency = tr->max_latency;
1320        max_data->critical_start = data->critical_start;
1321        max_data->critical_end = data->critical_end;
1322
1323        memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1324        max_data->pid = tsk->pid;
1325        /*
1326         * If tsk == current, then use current_uid(), as that does not use
1327         * RCU. The irq tracer can be called out of RCU scope.
1328         */
1329        if (tsk == current)
1330                max_data->uid = current_uid();
1331        else
1332                max_data->uid = task_uid(tsk);
1333
1334        max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335        max_data->policy = tsk->policy;
1336        max_data->rt_priority = tsk->rt_priority;
1337
1338        /* record this tasks comm */
1339        tracing_record_cmdline(tsk);
1340}
1341
1342/**
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1344 * @tr: tracer
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1347 *
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1350 */
1351void
1352update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
1354        struct ring_buffer *buf;
1355
1356        if (tr->stop_count)
1357                return;
1358
1359        WARN_ON_ONCE(!irqs_disabled());
1360
1361        if (!tr->allocated_snapshot) {
1362                /* Only the nop tracer should hit this when disabling */
1363                WARN_ON_ONCE(tr->current_trace != &nop_trace);
1364                return;
1365        }
1366
1367        arch_spin_lock(&tr->max_lock);
1368
1369        buf = tr->trace_buffer.buffer;
1370        tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371        tr->max_buffer.buffer = buf;
1372
1373        __update_max_tr(tr, tsk, cpu);
1374        arch_spin_unlock(&tr->max_lock);
1375}
1376
1377/**
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1379 * @tr - tracer
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
1382 *
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1384 */
1385void
1386update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1387{
1388        int ret;
1389
1390        if (tr->stop_count)
1391                return;
1392
1393        WARN_ON_ONCE(!irqs_disabled());
1394        if (!tr->allocated_snapshot) {
1395                /* Only the nop tracer should hit this when disabling */
1396                WARN_ON_ONCE(tr->current_trace != &nop_trace);
1397                return;
1398        }
1399
1400        arch_spin_lock(&tr->max_lock);
1401
1402        ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1403
1404        if (ret == -EBUSY) {
1405                /*
1406                 * We failed to swap the buffer due to a commit taking
1407                 * place on this CPU. We fail to record, but we reset
1408                 * the max trace buffer (no one writes directly to it)
1409                 * and flag that it failed.
1410                 */
1411                trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1412                        "Failed to swap buffers due to commit in progress\n");
1413        }
1414
1415        WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1416
1417        __update_max_tr(tr, tsk, cpu);
1418        arch_spin_unlock(&tr->max_lock);
1419}
1420#endif /* CONFIG_TRACER_MAX_TRACE */
1421
1422static int wait_on_pipe(struct trace_iterator *iter, bool full)
1423{
1424        /* Iterators are static, they should be filled or empty */
1425        if (trace_buffer_iter(iter, iter->cpu_file))
1426                return 0;
1427
1428        return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1429                                full);
1430}
1431
1432#ifdef CONFIG_FTRACE_STARTUP_TEST
1433static bool selftests_can_run;
1434
1435struct trace_selftests {
1436        struct list_head                list;
1437        struct tracer                   *type;
1438};
1439
1440static LIST_HEAD(postponed_selftests);
1441
1442static int save_selftest(struct tracer *type)
1443{
1444        struct trace_selftests *selftest;
1445
1446        selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1447        if (!selftest)
1448                return -ENOMEM;
1449
1450        selftest->type = type;
1451        list_add(&selftest->list, &postponed_selftests);
1452        return 0;
1453}
1454
1455static int run_tracer_selftest(struct tracer *type)
1456{
1457        struct trace_array *tr = &global_trace;
1458        struct tracer *saved_tracer = tr->current_trace;
1459        int ret;
1460
1461        if (!type->selftest || tracing_selftest_disabled)
1462                return 0;
1463
1464        /*
1465         * If a tracer registers early in boot up (before scheduling is
1466         * initialized and such), then do not run its selftests yet.
1467         * Instead, run it a little later in the boot process.
1468         */
1469        if (!selftests_can_run)
1470                return save_selftest(type);
1471
1472        /*
1473         * Run a selftest on this tracer.
1474         * Here we reset the trace buffer, and set the current
1475         * tracer to be this tracer. The tracer can then run some
1476         * internal tracing to verify that everything is in order.
1477         * If we fail, we do not register this tracer.
1478         */
1479        tracing_reset_online_cpus(&tr->trace_buffer);
1480
1481        tr->current_trace = type;
1482
1483#ifdef CONFIG_TRACER_MAX_TRACE
1484        if (type->use_max_tr) {
1485                /* If we expanded the buffers, make sure the max is expanded too */
1486                if (ring_buffer_expanded)
1487                        ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1488                                           RING_BUFFER_ALL_CPUS);
1489                tr->allocated_snapshot = true;
1490        }
1491#endif
1492
1493        /* the test is responsible for initializing and enabling */
1494        pr_info("Testing tracer %s: ", type->name);
1495        ret = type->selftest(type, tr);
1496        /* the test is responsible for resetting too */
1497        tr->current_trace = saved_tracer;
1498        if (ret) {
1499                printk(KERN_CONT "FAILED!\n");
1500                /* Add the warning after printing 'FAILED' */
1501                WARN_ON(1);
1502                return -1;
1503        }
1504        /* Only reset on passing, to avoid touching corrupted buffers */
1505        tracing_reset_online_cpus(&tr->trace_buffer);
1506
1507#ifdef CONFIG_TRACER_MAX_TRACE
1508        if (type->use_max_tr) {
1509                tr->allocated_snapshot = false;
1510
1511                /* Shrink the max buffer again */
1512                if (ring_buffer_expanded)
1513                        ring_buffer_resize(tr->max_buffer.buffer, 1,
1514                                           RING_BUFFER_ALL_CPUS);
1515        }
1516#endif
1517
1518        printk(KERN_CONT "PASSED\n");
1519        return 0;
1520}
1521
1522static __init int init_trace_selftests(void)
1523{
1524        struct trace_selftests *p, *n;
1525        struct tracer *t, **last;
1526        int ret;
1527
1528        selftests_can_run = true;
1529
1530        mutex_lock(&trace_types_lock);
1531
1532        if (list_empty(&postponed_selftests))
1533                goto out;
1534
1535        pr_info("Running postponed tracer tests:\n");
1536
1537        list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538                ret = run_tracer_selftest(p->type);
1539                /* If the test fails, then warn and remove from available_tracers */
1540                if (ret < 0) {
1541                        WARN(1, "tracer: %s failed selftest, disabling\n",
1542                             p->type->name);
1543                        last = &trace_types;
1544                        for (t = trace_types; t; t = t->next) {
1545                                if (t == p->type) {
1546                                        *last = t->next;
1547                                        break;
1548                                }
1549                                last = &t->next;
1550                        }
1551                }
1552                list_del(&p->list);
1553                kfree(p);
1554        }
1555
1556 out:
1557        mutex_unlock(&trace_types_lock);
1558
1559        return 0;
1560}
1561core_initcall(init_trace_selftests);
1562#else
1563static inline int run_tracer_selftest(struct tracer *type)
1564{
1565        return 0;
1566}
1567#endif /* CONFIG_FTRACE_STARTUP_TEST */
1568
1569static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1570
1571static void __init apply_trace_boot_options(void);
1572
1573/**
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1576 *
1577 * Register a new plugin tracer.
1578 */
1579int __init register_tracer(struct tracer *type)
1580{
1581        struct tracer *t;
1582        int ret = 0;
1583
1584        if (!type->name) {
1585                pr_info("Tracer must have a name\n");
1586                return -1;
1587        }
1588
1589        if (strlen(type->name) >= MAX_TRACER_SIZE) {
1590                pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1591                return -1;
1592        }
1593
1594        mutex_lock(&trace_types_lock);
1595
1596        tracing_selftest_running = true;
1597
1598        for (t = trace_types; t; t = t->next) {
1599                if (strcmp(type->name, t->name) == 0) {
1600                        /* already found */
1601                        pr_info("Tracer %s already registered\n",
1602                                type->name);
1603                        ret = -1;
1604                        goto out;
1605                }
1606        }
1607
1608        if (!type->set_flag)
1609                type->set_flag = &dummy_set_flag;
1610        if (!type->flags) {
1611                /*allocate a dummy tracer_flags*/
1612                type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1613                if (!type->flags) {
1614                        ret = -ENOMEM;
1615                        goto out;
1616                }
1617                type->flags->val = 0;
1618                type->flags->opts = dummy_tracer_opt;
1619        } else
1620                if (!type->flags->opts)
1621                        type->flags->opts = dummy_tracer_opt;
1622
1623        /* store the tracer for __set_tracer_option */
1624        type->flags->trace = type;
1625
1626        ret = run_tracer_selftest(type);
1627        if (ret < 0)
1628                goto out;
1629
1630        type->next = trace_types;
1631        trace_types = type;
1632        add_tracer_options(&global_trace, type);
1633
1634 out:
1635        tracing_selftest_running = false;
1636        mutex_unlock(&trace_types_lock);
1637
1638        if (ret || !default_bootup_tracer)
1639                goto out_unlock;
1640
1641        if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1642                goto out_unlock;
1643
1644        printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1645        /* Do we want this tracer to start on bootup? */
1646        tracing_set_tracer(&global_trace, type->name);
1647        default_bootup_tracer = NULL;
1648
1649        apply_trace_boot_options();
1650
1651        /* disable other selftests, since this will break it. */
1652        tracing_selftest_disabled = true;
1653#ifdef CONFIG_FTRACE_STARTUP_TEST
1654        printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1655               type->name);
1656#endif
1657
1658 out_unlock:
1659        return ret;
1660}
1661
1662void tracing_reset(struct trace_buffer *buf, int cpu)
1663{
1664        struct ring_buffer *buffer = buf->buffer;
1665
1666        if (!buffer)
1667                return;
1668
1669        ring_buffer_record_disable(buffer);
1670
1671        /* Make sure all commits have finished */
1672        synchronize_sched();
1673        ring_buffer_reset_cpu(buffer, cpu);
1674
1675        ring_buffer_record_enable(buffer);
1676}
1677
1678void tracing_reset_online_cpus(struct trace_buffer *buf)
1679{
1680        struct ring_buffer *buffer = buf->buffer;
1681        int cpu;
1682
1683        if (!buffer)
1684                return;
1685
1686        ring_buffer_record_disable(buffer);
1687
1688        /* Make sure all commits have finished */
1689        synchronize_sched();
1690
1691        buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1692
1693        for_each_online_cpu(cpu)
1694                ring_buffer_reset_cpu(buffer, cpu);
1695
1696        ring_buffer_record_enable(buffer);
1697}
1698
1699/* Must have trace_types_lock held */
1700void tracing_reset_all_online_cpus(void)
1701{
1702        struct trace_array *tr;
1703
1704        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1705                if (!tr->clear_trace)
1706                        continue;
1707                tr->clear_trace = false;
1708                tracing_reset_online_cpus(&tr->trace_buffer);
1709#ifdef CONFIG_TRACER_MAX_TRACE
1710                tracing_reset_online_cpus(&tr->max_buffer);
1711#endif
1712        }
1713}
1714
1715static int *tgid_map;
1716
1717#define SAVED_CMDLINES_DEFAULT 128
1718#define NO_CMDLINE_MAP UINT_MAX
1719static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1720struct saved_cmdlines_buffer {
1721        unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1722        unsigned *map_cmdline_to_pid;
1723        unsigned cmdline_num;
1724        int cmdline_idx;
1725        char *saved_cmdlines;
1726};
1727static struct saved_cmdlines_buffer *savedcmd;
1728
1729/* temporary disable recording */
1730static atomic_t trace_record_taskinfo_disabled __read_mostly;
1731
1732static inline char *get_saved_cmdlines(int idx)
1733{
1734        return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1735}
1736
1737static inline void set_cmdline(int idx, const char *cmdline)
1738{
1739        memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1740}
1741
1742static int allocate_cmdlines_buffer(unsigned int val,
1743                                    struct saved_cmdlines_buffer *s)
1744{
1745        s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1746                                        GFP_KERNEL);
1747        if (!s->map_cmdline_to_pid)
1748                return -ENOMEM;
1749
1750        s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1751        if (!s->saved_cmdlines) {
1752                kfree(s->map_cmdline_to_pid);
1753                return -ENOMEM;
1754        }
1755
1756        s->cmdline_idx = 0;
1757        s->cmdline_num = val;
1758        memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1759               sizeof(s->map_pid_to_cmdline));
1760        memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1761               val * sizeof(*s->map_cmdline_to_pid));
1762
1763        return 0;
1764}
1765
1766static int trace_create_savedcmd(void)
1767{
1768        int ret;
1769
1770        savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1771        if (!savedcmd)
1772                return -ENOMEM;
1773
1774        ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1775        if (ret < 0) {
1776                kfree(savedcmd);
1777                savedcmd = NULL;
1778                return -ENOMEM;
1779        }
1780
1781        return 0;
1782}
1783
1784int is_tracing_stopped(void)
1785{
1786        return global_trace.stop_count;
1787}
1788
1789/**
1790 * tracing_start - quick start of the tracer
1791 *
1792 * If tracing is enabled but was stopped by tracing_stop,
1793 * this will start the tracer back up.
1794 */
1795void tracing_start(void)
1796{
1797        struct ring_buffer *buffer;
1798        unsigned long flags;
1799
1800        if (tracing_disabled)
1801                return;
1802
1803        raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1804        if (--global_trace.stop_count) {
1805                if (global_trace.stop_count < 0) {
1806                        /* Someone screwed up their debugging */
1807                        WARN_ON_ONCE(1);
1808                        global_trace.stop_count = 0;
1809                }
1810                goto out;
1811        }
1812
1813        /* Prevent the buffers from switching */
1814        arch_spin_lock(&global_trace.max_lock);
1815
1816        buffer = global_trace.trace_buffer.buffer;
1817        if (buffer)
1818                ring_buffer_record_enable(buffer);
1819
1820#ifdef CONFIG_TRACER_MAX_TRACE
1821        buffer = global_trace.max_buffer.buffer;
1822        if (buffer)
1823                ring_buffer_record_enable(buffer);
1824#endif
1825
1826        arch_spin_unlock(&global_trace.max_lock);
1827
1828 out:
1829        raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1830}
1831
1832static void tracing_start_tr(struct trace_array *tr)
1833{
1834        struct ring_buffer *buffer;
1835        unsigned long flags;
1836
1837        if (tracing_disabled)
1838                return;
1839
1840        /* If global, we need to also start the max tracer */
1841        if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1842                return tracing_start();
1843
1844        raw_spin_lock_irqsave(&tr->start_lock, flags);
1845
1846        if (--tr->stop_count) {
1847                if (tr->stop_count < 0) {
1848                        /* Someone screwed up their debugging */
1849                        WARN_ON_ONCE(1);
1850                        tr->stop_count = 0;
1851                }
1852                goto out;
1853        }
1854
1855        buffer = tr->trace_buffer.buffer;
1856        if (buffer)
1857                ring_buffer_record_enable(buffer);
1858
1859 out:
1860        raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1861}
1862
1863/**
1864 * tracing_stop - quick stop of the tracer
1865 *
1866 * Light weight way to stop tracing. Use in conjunction with
1867 * tracing_start.
1868 */
1869void tracing_stop(void)
1870{
1871        struct ring_buffer *buffer;
1872        unsigned long flags;
1873
1874        raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1875        if (global_trace.stop_count++)
1876                goto out;
1877
1878        /* Prevent the buffers from switching */
1879        arch_spin_lock(&global_trace.max_lock);
1880
1881        buffer = global_trace.trace_buffer.buffer;
1882        if (buffer)
1883                ring_buffer_record_disable(buffer);
1884
1885#ifdef CONFIG_TRACER_MAX_TRACE
1886        buffer = global_trace.max_buffer.buffer;
1887        if (buffer)
1888                ring_buffer_record_disable(buffer);
1889#endif
1890
1891        arch_spin_unlock(&global_trace.max_lock);
1892
1893 out:
1894        raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1895}
1896
1897static void tracing_stop_tr(struct trace_array *tr)
1898{
1899        struct ring_buffer *buffer;
1900        unsigned long flags;
1901
1902        /* If global, we need to also stop the max tracer */
1903        if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1904                return tracing_stop();
1905
1906        raw_spin_lock_irqsave(&tr->start_lock, flags);
1907        if (tr->stop_count++)
1908                goto out;
1909
1910        buffer = tr->trace_buffer.buffer;
1911        if (buffer)
1912                ring_buffer_record_disable(buffer);
1913
1914 out:
1915        raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1916}
1917
1918static int trace_save_cmdline(struct task_struct *tsk)
1919{
1920        unsigned pid, idx;
1921
1922        /* treat recording of idle task as a success */
1923        if (!tsk->pid)
1924                return 1;
1925
1926        if (unlikely(tsk->pid > PID_MAX_DEFAULT))
1927                return 0;
1928
1929        /*
1930         * It's not the end of the world if we don't get
1931         * the lock, but we also don't want to spin
1932         * nor do we want to disable interrupts,
1933         * so if we miss here, then better luck next time.
1934         */
1935        if (!arch_spin_trylock(&trace_cmdline_lock))
1936                return 0;
1937
1938        idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1939        if (idx == NO_CMDLINE_MAP) {
1940                idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1941
1942                /*
1943                 * Check whether the cmdline buffer at idx has a pid
1944                 * mapped. We are going to overwrite that entry so we
1945                 * need to clear the map_pid_to_cmdline. Otherwise we
1946                 * would read the new comm for the old pid.
1947                 */
1948                pid = savedcmd->map_cmdline_to_pid[idx];
1949                if (pid != NO_CMDLINE_MAP)
1950                        savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1951
1952                savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1953                savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1954
1955                savedcmd->cmdline_idx = idx;
1956        }
1957
1958        set_cmdline(idx, tsk->comm);
1959
1960        arch_spin_unlock(&trace_cmdline_lock);
1961
1962        return 1;
1963}
1964
1965static void __trace_find_cmdline(int pid, char comm[])
1966{
1967        unsigned map;
1968
1969        if (!pid) {
1970                strcpy(comm, "<idle>");
1971                return;
1972        }
1973
1974        if (WARN_ON_ONCE(pid < 0)) {
1975                strcpy(comm, "<XXX>");
1976                return;
1977        }
1978
1979        if (pid > PID_MAX_DEFAULT) {
1980                strcpy(comm, "<...>");
1981                return;
1982        }
1983
1984        map = savedcmd->map_pid_to_cmdline[pid];
1985        if (map != NO_CMDLINE_MAP)
1986                strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
1987        else
1988                strcpy(comm, "<...>");
1989}
1990
1991void trace_find_cmdline(int pid, char comm[])
1992{
1993        preempt_disable();
1994        arch_spin_lock(&trace_cmdline_lock);
1995
1996        __trace_find_cmdline(pid, comm);
1997
1998        arch_spin_unlock(&trace_cmdline_lock);
1999        preempt_enable();
2000}
2001
2002int trace_find_tgid(int pid)
2003{
2004        if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2005                return 0;
2006
2007        return tgid_map[pid];
2008}
2009
2010static int trace_save_tgid(struct task_struct *tsk)
2011{
2012        /* treat recording of idle task as a success */
2013        if (!tsk->pid)
2014                return 1;
2015
2016        if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2017                return 0;
2018
2019        tgid_map[tsk->pid] = tsk->tgid;
2020        return 1;
2021}
2022
2023static bool tracing_record_taskinfo_skip(int flags)
2024{
2025        if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2026                return true;
2027        if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2028                return true;
2029        if (!__this_cpu_read(trace_taskinfo_save))
2030                return true;
2031        return false;
2032}
2033
2034/**
2035 * tracing_record_taskinfo - record the task info of a task
2036 *
2037 * @task  - task to record
2038 * @flags - TRACE_RECORD_CMDLINE for recording comm
2039 *        - TRACE_RECORD_TGID for recording tgid
2040 */
2041void tracing_record_taskinfo(struct task_struct *task, int flags)
2042{
2043        bool done;
2044
2045        if (tracing_record_taskinfo_skip(flags))
2046                return;
2047
2048        /*
2049         * Record as much task information as possible. If some fail, continue
2050         * to try to record the others.
2051         */
2052        done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2053        done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2054
2055        /* If recording any information failed, retry again soon. */
2056        if (!done)
2057                return;
2058
2059        __this_cpu_write(trace_taskinfo_save, false);
2060}
2061
2062/**
2063 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2064 *
2065 * @prev - previous task during sched_switch
2066 * @next - next task during sched_switch
2067 * @flags - TRACE_RECORD_CMDLINE for recording comm
2068 *          TRACE_RECORD_TGID for recording tgid
2069 */
2070void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2071                                          struct task_struct *next, int flags)
2072{
2073        bool done;
2074
2075        if (tracing_record_taskinfo_skip(flags))
2076                return;
2077
2078        /*
2079         * Record as much task information as possible. If some fail, continue
2080         * to try to record the others.
2081         */
2082        done  = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2083        done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2084        done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2085        done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2086
2087        /* If recording any information failed, retry again soon. */
2088        if (!done)
2089                return;
2090
2091        __this_cpu_write(trace_taskinfo_save, false);
2092}
2093
2094/* Helpers to record a specific task information */
2095void tracing_record_cmdline(struct task_struct *task)
2096{
2097        tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2098}
2099
2100void tracing_record_tgid(struct task_struct *task)
2101{
2102        tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2103}
2104
2105/*
2106 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2107 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2108 * simplifies those functions and keeps them in sync.
2109 */
2110enum print_line_t trace_handle_return(struct trace_seq *s)
2111{
2112        return trace_seq_has_overflowed(s) ?
2113                TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2114}
2115EXPORT_SYMBOL_GPL(trace_handle_return);
2116
2117void
2118tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2119                             int pc)
2120{
2121        struct task_struct *tsk = current;
2122
2123        entry->preempt_count            = pc & 0xff;
2124        entry->pid                      = (tsk) ? tsk->pid : 0;
2125        entry->flags =
2126#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2127                (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2128#else
2129                TRACE_FLAG_IRQS_NOSUPPORT |
2130#endif
2131                ((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
2132                ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2133                ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2134                (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2135                (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2136}
2137EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2138
2139struct ring_buffer_event *
2140trace_buffer_lock_reserve(struct ring_buffer *buffer,
2141                          int type,
2142                          unsigned long len,
2143                          unsigned long flags, int pc)
2144{
2145        return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2146}
2147
2148DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2149DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2150static int trace_buffered_event_ref;
2151
2152/**
2153 * trace_buffered_event_enable - enable buffering events
2154 *
2155 * When events are being filtered, it is quicker to use a temporary
2156 * buffer to write the event data into if there's a likely chance
2157 * that it will not be committed. The discard of the ring buffer
2158 * is not as fast as committing, and is much slower than copying
2159 * a commit.
2160 *
2161 * When an event is to be filtered, allocate per cpu buffers to
2162 * write the event data into, and if the event is filtered and discarded
2163 * it is simply dropped, otherwise, the entire data is to be committed
2164 * in one shot.
2165 */
2166void trace_buffered_event_enable(void)
2167{
2168        struct ring_buffer_event *event;
2169        struct page *page;
2170        int cpu;
2171
2172        WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2173
2174        if (trace_buffered_event_ref++)
2175                return;
2176
2177        for_each_tracing_cpu(cpu) {
2178                page = alloc_pages_node(cpu_to_node(cpu),
2179                                        GFP_KERNEL | __GFP_NORETRY, 0);
2180                if (!page)
2181                        goto failed;
2182
2183                event = page_address(page);
2184                memset(event, 0, sizeof(*event));
2185
2186                per_cpu(trace_buffered_event, cpu) = event;
2187
2188                preempt_disable();
2189                if (cpu == smp_processor_id() &&
2190                    this_cpu_read(trace_buffered_event) !=
2191                    per_cpu(trace_buffered_event, cpu))
2192                        WARN_ON_ONCE(1);
2193                preempt_enable();
2194        }
2195
2196        return;
2197 failed:
2198        trace_buffered_event_disable();
2199}
2200
2201static void enable_trace_buffered_event(void *data)
2202{
2203        /* Probably not needed, but do it anyway */
2204        smp_rmb();
2205        this_cpu_dec(trace_buffered_event_cnt);
2206}
2207
2208static void disable_trace_buffered_event(void *data)
2209{
2210        this_cpu_inc(trace_buffered_event_cnt);
2211}
2212
2213/**
2214 * trace_buffered_event_disable - disable buffering events
2215 *
2216 * When a filter is removed, it is faster to not use the buffered
2217 * events, and to commit directly into the ring buffer. Free up
2218 * the temp buffers when there are no more users. This requires
2219 * special synchronization with current events.
2220 */
2221void trace_buffered_event_disable(void)
2222{
2223        int cpu;
2224
2225        WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2226
2227        if (WARN_ON_ONCE(!trace_buffered_event_ref))
2228                return;
2229
2230        if (--trace_buffered_event_ref)
2231                return;
2232
2233        preempt_disable();
2234        /* For each CPU, set the buffer as used. */
2235        smp_call_function_many(tracing_buffer_mask,
2236                               disable_trace_buffered_event, NULL, 1);
2237        preempt_enable();
2238
2239        /* Wait for all current users to finish */
2240        synchronize_sched();
2241
2242        for_each_tracing_cpu(cpu) {
2243                free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2244                per_cpu(trace_buffered_event, cpu) = NULL;
2245        }
2246        /*
2247         * Make sure trace_buffered_event is NULL before clearing
2248         * trace_buffered_event_cnt.
2249         */
2250        smp_wmb();
2251
2252        preempt_disable();
2253        /* Do the work on each cpu */
2254        smp_call_function_many(tracing_buffer_mask,
2255                               enable_trace_buffered_event, NULL, 1);
2256        preempt_enable();
2257}
2258
2259static struct ring_buffer *temp_buffer;
2260
2261struct ring_buffer_event *
2262trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2263                          struct trace_event_file *trace_file,
2264                          int type, unsigned long len,
2265                          unsigned long flags, int pc)
2266{
2267        struct ring_buffer_event *entry;
2268        int val;
2269
2270        *current_rb = trace_file->tr->trace_buffer.buffer;
2271
2272        if ((trace_file->flags &
2273             (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2274            (entry = this_cpu_read(trace_buffered_event))) {
2275                /* Try to use the per cpu buffer first */
2276                val = this_cpu_inc_return(trace_buffered_event_cnt);
2277                if (val == 1) {
2278                        trace_event_setup(entry, type, flags, pc);
2279                        entry->array[0] = len;
2280                        return entry;
2281                }
2282                this_cpu_dec(trace_buffered_event_cnt);
2283        }
2284
2285        entry = __trace_buffer_lock_reserve(*current_rb,
2286                                            type, len, flags, pc);
2287        /*
2288         * If tracing is off, but we have triggers enabled
2289         * we still need to look at the event data. Use the temp_buffer
2290         * to store the trace event for the tigger to use. It's recusive
2291         * safe and will not be recorded anywhere.
2292         */
2293        if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2294                *current_rb = temp_buffer;
2295                entry = __trace_buffer_lock_reserve(*current_rb,
2296                                                    type, len, flags, pc);
2297        }
2298        return entry;
2299}
2300EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2301
2302static DEFINE_SPINLOCK(tracepoint_iter_lock);
2303static DEFINE_MUTEX(tracepoint_printk_mutex);
2304
2305static void output_printk(struct trace_event_buffer *fbuffer)
2306{
2307        struct trace_event_call *event_call;
2308        struct trace_event *event;
2309        unsigned long flags;
2310        struct trace_iterator *iter = tracepoint_print_iter;
2311
2312        /* We should never get here if iter is NULL */
2313        if (WARN_ON_ONCE(!iter))
2314                return;
2315
2316        event_call = fbuffer->trace_file->event_call;
2317        if (!event_call || !event_call->event.funcs ||
2318            !event_call->event.funcs->trace)
2319                return;
2320
2321        event = &fbuffer->trace_file->event_call->event;
2322
2323        spin_lock_irqsave(&tracepoint_iter_lock, flags);
2324        trace_seq_init(&iter->seq);
2325        iter->ent = fbuffer->entry;
2326        event_call->event.funcs->trace(iter, 0, event);
2327        trace_seq_putc(&iter->seq, 0);
2328        printk("%s", iter->seq.buffer);
2329
2330        spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2331}
2332
2333int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2334                             void __user *buffer, size_t *lenp,
2335                             loff_t *ppos)
2336{
2337        int save_tracepoint_printk;
2338        int ret;
2339
2340        mutex_lock(&tracepoint_printk_mutex);
2341        save_tracepoint_printk = tracepoint_printk;
2342
2343        ret = proc_dointvec(table, write, buffer, lenp, ppos);
2344
2345        /*
2346         * This will force exiting early, as tracepoint_printk
2347         * is always zero when tracepoint_printk_iter is not allocated
2348         */
2349        if (!tracepoint_print_iter)
2350                tracepoint_printk = 0;
2351
2352        if (save_tracepoint_printk == tracepoint_printk)
2353                goto out;
2354
2355        if (tracepoint_printk)
2356                static_key_enable(&tracepoint_printk_key.key);
2357        else
2358                static_key_disable(&tracepoint_printk_key.key);
2359
2360 out:
2361        mutex_unlock(&tracepoint_printk_mutex);
2362
2363        return ret;
2364}
2365
2366void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2367{
2368        if (static_key_false(&tracepoint_printk_key.key))
2369                output_printk(fbuffer);
2370
2371        event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2372                                    fbuffer->event, fbuffer->entry,
2373                                    fbuffer->flags, fbuffer->pc);
2374}
2375EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2376
2377void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2378                                     struct ring_buffer *buffer,
2379                                     struct ring_buffer_event *event,
2380                                     unsigned long flags, int pc,
2381                                     struct pt_regs *regs)
2382{
2383        __buffer_unlock_commit(buffer, event);
2384
2385        /*
2386         * If regs is not set, then skip the following callers:
2387         *   trace_buffer_unlock_commit_regs
2388         *   event_trigger_unlock_commit
2389         *   trace_event_buffer_commit
2390         *   trace_event_raw_event_sched_switch
2391         * Note, we can still get here via blktrace, wakeup tracer
2392         * and mmiotrace, but that's ok if they lose a function or
2393         * two. They are that meaningful.
2394         */
2395        ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
2396        ftrace_trace_userstack(buffer, flags, pc);
2397}
2398
2399/*
2400 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2401 */
2402void
2403trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2404                                   struct ring_buffer_event *event)
2405{
2406        __buffer_unlock_commit(buffer, event);
2407}
2408
2409static void
2410trace_process_export(struct trace_export *export,
2411               struct ring_buffer_event *event)
2412{
2413        struct trace_entry *entry;
2414        unsigned int size = 0;
2415
2416        entry = ring_buffer_event_data(event);
2417        size = ring_buffer_event_length(event);
2418        export->write(entry, size);
2419}
2420
2421static DEFINE_MUTEX(ftrace_export_lock);
2422
2423static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2424
2425static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2426
2427static inline void ftrace_exports_enable(void)
2428{
2429        static_branch_enable(&ftrace_exports_enabled);
2430}
2431
2432static inline void ftrace_exports_disable(void)
2433{
2434        static_branch_disable(&ftrace_exports_enabled);
2435}
2436
2437void ftrace_exports(struct ring_buffer_event *event)
2438{
2439        struct trace_export *export;
2440
2441        preempt_disable_notrace();
2442
2443        export = rcu_dereference_raw_notrace(ftrace_exports_list);
2444        while (export) {
2445                trace_process_export(export, event);
2446                export = rcu_dereference_raw_notrace(export->next);
2447        }
2448
2449        preempt_enable_notrace();
2450}
2451
2452static inline void
2453add_trace_export(struct trace_export **list, struct trace_export *export)
2454{
2455        rcu_assign_pointer(export->next, *list);
2456        /*
2457         * We are entering export into the list but another
2458         * CPU might be walking that list. We need to make sure
2459         * the export->next pointer is valid before another CPU sees
2460         * the export pointer included into the list.
2461         */
2462        rcu_assign_pointer(*list, export);
2463}
2464
2465static inline int
2466rm_trace_export(struct trace_export **list, struct trace_export *export)
2467{
2468        struct trace_export **p;
2469
2470        for (p = list; *p != NULL; p = &(*p)->next)
2471                if (*p == export)
2472                        break;
2473
2474        if (*p != export)
2475                return -1;
2476
2477        rcu_assign_pointer(*p, (*p)->next);
2478
2479        return 0;
2480}
2481
2482static inline void
2483add_ftrace_export(struct trace_export **list, struct trace_export *export)
2484{
2485        if (*list == NULL)
2486                ftrace_exports_enable();
2487
2488        add_trace_export(list, export);
2489}
2490
2491static inline int
2492rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2493{
2494        int ret;
2495
2496        ret = rm_trace_export(list, export);
2497        if (*list == NULL)
2498                ftrace_exports_disable();
2499
2500        return ret;
2501}
2502
2503int register_ftrace_export(struct trace_export *export)
2504{
2505        if (WARN_ON_ONCE(!export->write))
2506                return -1;
2507
2508        mutex_lock(&ftrace_export_lock);
2509
2510        add_ftrace_export(&ftrace_exports_list, export);
2511
2512        mutex_unlock(&ftrace_export_lock);
2513
2514        return 0;
2515}
2516EXPORT_SYMBOL_GPL(register_ftrace_export);
2517
2518int unregister_ftrace_export(struct trace_export *export)
2519{
2520        int ret;
2521
2522        mutex_lock(&ftrace_export_lock);
2523
2524        ret = rm_ftrace_export(&ftrace_exports_list, export);
2525
2526        mutex_unlock(&ftrace_export_lock);
2527
2528        return ret;
2529}
2530EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2531
2532void
2533trace_function(struct trace_array *tr,
2534               unsigned long ip, unsigned long parent_ip, unsigned long flags,
2535               int pc)
2536{
2537        struct trace_event_call *call = &event_function;
2538        struct ring_buffer *buffer = tr->trace_buffer.buffer;
2539        struct ring_buffer_event *event;
2540        struct ftrace_entry *entry;
2541
2542        event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2543                                            flags, pc);
2544        if (!event)
2545                return;
2546        entry   = ring_buffer_event_data(event);
2547        entry->ip                       = ip;
2548        entry->parent_ip                = parent_ip;
2549
2550        if (!call_filter_check_discard(call, entry, buffer, event)) {
2551                if (static_branch_unlikely(&ftrace_exports_enabled))
2552                        ftrace_exports(event);
2553                __buffer_unlock_commit(buffer, event);
2554        }
2555}
2556
2557#ifdef CONFIG_STACKTRACE
2558
2559#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2560struct ftrace_stack {
2561        unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
2562};
2563
2564static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2565static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2566
2567static void __ftrace_trace_stack(struct ring_buffer *buffer,
2568                                 unsigned long flags,
2569                                 int skip, int pc, struct pt_regs *regs)
2570{
2571        struct trace_event_call *call = &event_kernel_stack;
2572        struct ring_buffer_event *event;
2573        struct stack_entry *entry;
2574        struct stack_trace trace;
2575        int use_stack;
2576        int size = FTRACE_STACK_ENTRIES;
2577
2578        trace.nr_entries        = 0;
2579        trace.skip              = skip;
2580
2581        /*
2582         * Add two, for this function and the call to save_stack_trace()
2583         * If regs is set, then these functions will not be in the way.
2584         */
2585        if (!regs)
2586                trace.skip += 2;
2587
2588        /*
2589         * Since events can happen in NMIs there's no safe way to
2590         * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2591         * or NMI comes in, it will just have to use the default
2592         * FTRACE_STACK_SIZE.
2593         */
2594        preempt_disable_notrace();
2595
2596        use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2597        /*
2598         * We don't need any atomic variables, just a barrier.
2599         * If an interrupt comes in, we don't care, because it would
2600         * have exited and put the counter back to what we want.
2601         * We just need a barrier to keep gcc from moving things
2602         * around.
2603         */
2604        barrier();
2605        if (use_stack == 1) {
2606                trace.entries           = this_cpu_ptr(ftrace_stack.calls);
2607                trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
2608
2609                if (regs)
2610                        save_stack_trace_regs(regs, &trace);
2611                else
2612                        save_stack_trace(&trace);
2613
2614                if (trace.nr_entries > size)
2615                        size = trace.nr_entries;
2616        } else
2617                /* From now on, use_stack is a boolean */
2618                use_stack = 0;
2619
2620        size *= sizeof(unsigned long);
2621
2622        event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2623                                            sizeof(*entry) + size, flags, pc);
2624        if (!event)
2625                goto out;
2626        entry = ring_buffer_event_data(event);
2627
2628        memset(&entry->caller, 0, size);
2629
2630        if (use_stack)
2631                memcpy(&entry->caller, trace.entries,
2632                       trace.nr_entries * sizeof(unsigned long));
2633        else {
2634                trace.max_entries       = FTRACE_STACK_ENTRIES;
2635                trace.entries           = entry->caller;
2636                if (regs)
2637                        save_stack_trace_regs(regs, &trace);
2638                else
2639                        save_stack_trace(&trace);
2640        }
2641
2642        entry->size = trace.nr_entries;
2643
2644        if (!call_filter_check_discard(call, entry, buffer, event))
2645                __buffer_unlock_commit(buffer, event);
2646
2647 out:
2648        /* Again, don't let gcc optimize things here */
2649        barrier();
2650        __this_cpu_dec(ftrace_stack_reserve);
2651        preempt_enable_notrace();
2652
2653}
2654
2655static inline void ftrace_trace_stack(struct trace_array *tr,
2656                                      struct ring_buffer *buffer,
2657                                      unsigned long flags,
2658                                      int skip, int pc, struct pt_regs *regs)
2659{
2660        if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2661                return;
2662
2663        __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2664}
2665
2666void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2667                   int pc)
2668{
2669        struct ring_buffer *buffer = tr->trace_buffer.buffer;
2670
2671        if (rcu_is_watching()) {
2672                __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2673                return;
2674        }
2675
2676        /*
2677         * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2678         * but if the above rcu_is_watching() failed, then the NMI
2679         * triggered someplace critical, and rcu_irq_enter() should
2680         * not be called from NMI.
2681         */
2682        if (unlikely(in_nmi()))
2683                return;
2684
2685        /*
2686         * It is possible that a function is being traced in a
2687         * location that RCU is not watching. A call to
2688         * rcu_irq_enter() will make sure that it is, but there's
2689         * a few internal rcu functions that could be traced
2690         * where that wont work either. In those cases, we just
2691         * do nothing.
2692         */
2693        if (unlikely(rcu_irq_enter_disabled()))
2694                return;
2695
2696        rcu_irq_enter_irqson();
2697        __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2698        rcu_irq_exit_irqson();
2699}
2700
2701/**
2702 * trace_dump_stack - record a stack back trace in the trace buffer
2703 * @skip: Number of functions to skip (helper handlers)
2704 */
2705void trace_dump_stack(int skip)
2706{
2707        unsigned long flags;
2708
2709        if (tracing_disabled || tracing_selftest_running)
2710                return;
2711
2712        local_save_flags(flags);
2713
2714        /*
2715         * Skip 3 more, seems to get us at the caller of
2716         * this function.
2717         */
2718        skip += 3;
2719        __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2720                             flags, skip, preempt_count(), NULL);
2721}
2722
2723static DEFINE_PER_CPU(int, user_stack_count);
2724
2725void
2726ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2727{
2728        struct trace_event_call *call = &event_user_stack;
2729        struct ring_buffer_event *event;
2730        struct userstack_entry *entry;
2731        struct stack_trace trace;
2732
2733        if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2734                return;
2735
2736        /*
2737         * NMIs can not handle page faults, even with fix ups.
2738         * The save user stack can (and often does) fault.
2739         */
2740        if (unlikely(in_nmi()))
2741                return;
2742
2743        /*
2744         * prevent recursion, since the user stack tracing may
2745         * trigger other kernel events.
2746         */
2747        preempt_disable();
2748        if (__this_cpu_read(user_stack_count))
2749                goto out;
2750
2751        __this_cpu_inc(user_stack_count);
2752
2753        event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2754                                            sizeof(*entry), flags, pc);
2755        if (!event)
2756                goto out_drop_count;
2757        entry   = ring_buffer_event_data(event);
2758
2759        entry->tgid             = current->tgid;
2760        memset(&entry->caller, 0, sizeof(entry->caller));
2761
2762        trace.nr_entries        = 0;
2763        trace.max_entries       = FTRACE_STACK_ENTRIES;
2764        trace.skip              = 0;
2765        trace.entries           = entry->caller;
2766
2767        save_stack_trace_user(&trace);
2768        if (!call_filter_check_discard(call, entry, buffer, event))
2769                __buffer_unlock_commit(buffer, event);
2770
2771 out_drop_count:
2772        __this_cpu_dec(user_stack_count);
2773 out:
2774        preempt_enable();
2775}
2776
2777#ifdef UNUSED
2778static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2779{
2780        ftrace_trace_userstack(tr, flags, preempt_count());
2781}
2782#endif /* UNUSED */
2783
2784#endif /* CONFIG_STACKTRACE */
2785
2786/* created for use with alloc_percpu */
2787struct trace_buffer_struct {
2788        int nesting;
2789        char buffer[4][TRACE_BUF_SIZE];
2790};
2791
2792static struct trace_buffer_struct *trace_percpu_buffer;
2793
2794/*
2795 * Thise allows for lockless recording.  If we're nested too deeply, then
2796 * this returns NULL.
2797 */
2798static char *get_trace_buf(void)
2799{
2800        struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2801
2802        if (!buffer || buffer->nesting >= 4)
2803                return NULL;
2804
2805        buffer->nesting++;
2806
2807        /* Interrupts must see nesting incremented before we use the buffer */
2808        barrier();
2809        return &buffer->buffer[buffer->nesting][0];
2810}
2811
2812static void put_trace_buf(void)
2813{
2814        /* Don't let the decrement of nesting leak before this */
2815        barrier();
2816        this_cpu_dec(trace_percpu_buffer->nesting);
2817}
2818
2819static int alloc_percpu_trace_buffer(void)
2820{
2821        struct trace_buffer_struct *buffers;
2822
2823        buffers = alloc_percpu(struct trace_buffer_struct);
2824        if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2825                return -ENOMEM;
2826
2827        trace_percpu_buffer = buffers;
2828        return 0;
2829}
2830
2831static int buffers_allocated;
2832
2833void trace_printk_init_buffers(void)
2834{
2835        if (buffers_allocated)
2836                return;
2837
2838        if (alloc_percpu_trace_buffer())
2839                return;
2840
2841        /* trace_printk() is for debug use only. Don't use it in production. */
2842
2843        pr_warn("\n");
2844        pr_warn("**********************************************************\n");
2845        pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2846        pr_warn("**                                                      **\n");
2847        pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
2848        pr_warn("**                                                      **\n");
2849        pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
2850        pr_warn("** unsafe for production use.                           **\n");
2851        pr_warn("**                                                      **\n");
2852        pr_warn("** If you see this message and you are not debugging    **\n");
2853        pr_warn("** the kernel, report this immediately to your vendor!  **\n");
2854        pr_warn("**                                                      **\n");
2855        pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2856        pr_warn("**********************************************************\n");
2857
2858        /* Expand the buffers to set size */
2859        tracing_update_buffers();
2860
2861        buffers_allocated = 1;
2862
2863        /*
2864         * trace_printk_init_buffers() can be called by modules.
2865         * If that happens, then we need to start cmdline recording
2866         * directly here. If the global_trace.buffer is already
2867         * allocated here, then this was called by module code.
2868         */
2869        if (global_trace.trace_buffer.buffer)
2870                tracing_start_cmdline_record();
2871}
2872
2873void trace_printk_start_comm(void)
2874{
2875        /* Start tracing comms if trace printk is set */
2876        if (!buffers_allocated)
2877                return;
2878        tracing_start_cmdline_record();
2879}
2880
2881static void trace_printk_start_stop_comm(int enabled)
2882{
2883        if (!buffers_allocated)
2884                return;
2885
2886        if (enabled)
2887                tracing_start_cmdline_record();
2888        else
2889                tracing_stop_cmdline_record();
2890}
2891
2892/**
2893 * trace_vbprintk - write binary msg to tracing buffer
2894 *
2895 */
2896int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2897{
2898        struct trace_event_call *call = &event_bprint;
2899        struct ring_buffer_event *event;
2900        struct ring_buffer *buffer;
2901        struct trace_array *tr = &global_trace;
2902        struct bprint_entry *entry;
2903        unsigned long flags;
2904        char *tbuffer;
2905        int len = 0, size, pc;
2906
2907        if (unlikely(tracing_selftest_running || tracing_disabled))
2908                return 0;
2909
2910        /* Don't pollute graph traces with trace_vprintk internals */
2911        pause_graph_tracing();
2912
2913        pc = preempt_count();
2914        preempt_disable_notrace();
2915
2916        tbuffer = get_trace_buf();
2917        if (!tbuffer) {
2918                len = 0;
2919                goto out_nobuffer;
2920        }
2921
2922        len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2923
2924        if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2925                goto out;
2926
2927        local_save_flags(flags);
2928        size = sizeof(*entry) + sizeof(u32) * len;
2929        buffer = tr->trace_buffer.buffer;
2930        event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2931                                            flags, pc);
2932        if (!event)
2933                goto out;
2934        entry = ring_buffer_event_data(event);
2935        entry->ip                       = ip;
2936        entry->fmt                      = fmt;
2937
2938        memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2939        if (!call_filter_check_discard(call, entry, buffer, event)) {
2940                __buffer_unlock_commit(buffer, event);
2941                ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2942        }
2943
2944out:
2945        put_trace_buf();
2946
2947out_nobuffer:
2948        preempt_enable_notrace();
2949        unpause_graph_tracing();
2950
2951        return len;
2952}
2953EXPORT_SYMBOL_GPL(trace_vbprintk);
2954
2955static int
2956__trace_array_vprintk(struct ring_buffer *buffer,
2957                      unsigned long ip, const char *fmt, va_list args)
2958{
2959        struct trace_event_call *call = &event_print;
2960        struct ring_buffer_event *event;
2961        int len = 0, size, pc;
2962        struct print_entry *entry;
2963        unsigned long flags;
2964        char *tbuffer;
2965
2966        if (tracing_disabled || tracing_selftest_running)
2967                return 0;
2968
2969        /* Don't pollute graph traces with trace_vprintk internals */
2970        pause_graph_tracing();
2971
2972        pc = preempt_count();
2973        preempt_disable_notrace();
2974
2975
2976        tbuffer = get_trace_buf();
2977        if (!tbuffer) {
2978                len = 0;
2979                goto out_nobuffer;
2980        }
2981
2982        len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2983
2984        local_save_flags(flags);
2985        size = sizeof(*entry) + len + 1;
2986        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2987                                            flags, pc);
2988        if (!event)
2989                goto out;
2990        entry = ring_buffer_event_data(event);
2991        entry->ip = ip;
2992
2993        memcpy(&entry->buf, tbuffer, len + 1);
2994        if (!call_filter_check_discard(call, entry, buffer, event)) {
2995                __buffer_unlock_commit(buffer, event);
2996                ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2997        }
2998
2999out:
3000        put_trace_buf();
3001
3002out_nobuffer:
3003        preempt_enable_notrace();
3004        unpause_graph_tracing();
3005
3006        return len;
3007}
3008
3009int trace_array_vprintk(struct trace_array *tr,
3010                        unsigned long ip, const char *fmt, va_list args)
3011{
3012        return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3013}
3014
3015int trace_array_printk(struct trace_array *tr,
3016                       unsigned long ip, const char *fmt, ...)
3017{
3018        int ret;
3019        va_list ap;
3020
3021        if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3022                return 0;
3023
3024        va_start(ap, fmt);
3025        ret = trace_array_vprintk(tr, ip, fmt, ap);
3026        va_end(ap);
3027        return ret;
3028}
3029
3030int trace_array_printk_buf(struct ring_buffer *buffer,
3031                           unsigned long ip, const char *fmt, ...)
3032{
3033        int ret;
3034        va_list ap;
3035
3036        if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3037                return 0;
3038
3039        va_start(ap, fmt);
3040        ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3041        va_end(ap);
3042        return ret;
3043}
3044
3045int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3046{
3047        return trace_array_vprintk(&global_trace, ip, fmt, args);
3048}
3049EXPORT_SYMBOL_GPL(trace_vprintk);
3050
3051static void trace_iterator_increment(struct trace_iterator *iter)
3052{
3053        struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3054
3055        iter->idx++;
3056        if (buf_iter)
3057                ring_buffer_read(buf_iter, NULL);
3058}
3059
3060static struct trace_entry *
3061peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3062                unsigned long *lost_events)
3063{
3064        struct ring_buffer_event *event;
3065        struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3066
3067        if (buf_iter)
3068                event = ring_buffer_iter_peek(buf_iter, ts);
3069        else
3070                event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3071                                         lost_events);
3072
3073        if (event) {
3074                iter->ent_size = ring_buffer_event_length(event);
3075                return ring_buffer_event_data(event);
3076        }
3077        iter->ent_size = 0;
3078        return NULL;
3079}
3080
3081static struct trace_entry *
3082__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3083                  unsigned long *missing_events, u64 *ent_ts)
3084{
3085        struct ring_buffer *buffer = iter->trace_buffer->buffer;
3086        struct trace_entry *ent, *next = NULL;
3087        unsigned long lost_events = 0, next_lost = 0;
3088        int cpu_file = iter->cpu_file;
3089        u64 next_ts = 0, ts;
3090        int next_cpu = -1;
3091        int next_size = 0;
3092        int cpu;
3093
3094        /*
3095         * If we are in a per_cpu trace file, don't bother by iterating over
3096         * all cpu and peek directly.
3097         */
3098        if (cpu_file > RING_BUFFER_ALL_CPUS) {
3099                if (ring_buffer_empty_cpu(buffer, cpu_file))
3100                        return NULL;
3101                ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3102                if (ent_cpu)
3103                        *ent_cpu = cpu_file;
3104
3105                return ent;
3106        }
3107
3108        for_each_tracing_cpu(cpu) {
3109
3110                if (ring_buffer_empty_cpu(buffer, cpu))
3111                        continue;
3112
3113                ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3114
3115                /*
3116                 * Pick the entry with the smallest timestamp:
3117                 */
3118                if (ent && (!next || ts < next_ts)) {
3119                        next = ent;
3120                        next_cpu = cpu;
3121                        next_ts = ts;
3122                        next_lost = lost_events;
3123                        next_size = iter->ent_size;
3124                }
3125        }
3126
3127        iter->ent_size = next_size;
3128
3129        if (ent_cpu)
3130                *ent_cpu = next_cpu;
3131
3132        if (ent_ts)
3133                *ent_ts = next_ts;
3134
3135        if (missing_events)
3136                *missing_events = next_lost;
3137
3138        return next;
3139}
3140
3141/* Find the next real entry, without updating the iterator itself */
3142struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3143                                          int *ent_cpu, u64 *ent_ts)
3144{
3145        return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3146}
3147
3148/* Find the next real entry, and increment the iterator to the next entry */
3149void *trace_find_next_entry_inc(struct trace_iterator *iter)
3150{
3151        iter->ent = __find_next_entry(iter, &iter->cpu,
3152                                      &iter->lost_events, &iter->ts);
3153
3154        if (iter->ent)
3155                trace_iterator_increment(iter);
3156
3157        return iter->ent ? iter : NULL;
3158}
3159
3160static void trace_consume(struct trace_iterator *iter)
3161{
3162        ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3163                            &iter->lost_events);
3164}
3165
3166static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3167{
3168        struct trace_iterator *iter = m->private;
3169        int i = (int)*pos;
3170        void *ent;
3171
3172        WARN_ON_ONCE(iter->leftover);
3173
3174        (*pos)++;
3175
3176        /* can't go backwards */
3177        if (iter->idx > i)
3178                return NULL;
3179
3180        if (iter->idx < 0)
3181                ent = trace_find_next_entry_inc(iter);
3182        else
3183                ent = iter;
3184
3185        while (ent && iter->idx < i)
3186                ent = trace_find_next_entry_inc(iter);
3187
3188        iter->pos = *pos;
3189
3190        return ent;
3191}
3192
3193void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3194{
3195        struct ring_buffer_event *event;
3196        struct ring_buffer_iter *buf_iter;
3197        unsigned long entries = 0;
3198        u64 ts;
3199
3200        per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3201
3202        buf_iter = trace_buffer_iter(iter, cpu);
3203        if (!buf_iter)
3204                return;
3205
3206        ring_buffer_iter_reset(buf_iter);
3207
3208        /*
3209         * We could have the case with the max latency tracers
3210         * that a reset never took place on a cpu. This is evident
3211         * by the timestamp being before the start of the buffer.
3212         */
3213        while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3214                if (ts >= iter->trace_buffer->time_start)
3215                        break;
3216                entries++;
3217                ring_buffer_read(buf_iter, NULL);
3218        }
3219
3220        per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3221}
3222
3223/*
3224 * The current tracer is copied to avoid a global locking
3225 * all around.
3226 */
3227static void *s_start(struct seq_file *m, loff_t *pos)
3228{
3229        struct trace_iterator *iter = m->private;
3230        struct trace_array *tr = iter->tr;
3231        int cpu_file = iter->cpu_file;
3232        void *p = NULL;
3233        loff_t l = 0;
3234        int cpu;
3235
3236        /*
3237         * copy the tracer to avoid using a global lock all around.
3238         * iter->trace is a copy of current_trace, the pointer to the
3239         * name may be used instead of a strcmp(), as iter->trace->name
3240         * will point to the same string as current_trace->name.
3241         */
3242        mutex_lock(&trace_types_lock);
3243        if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3244                *iter->trace = *tr->current_trace;
3245        mutex_unlock(&trace_types_lock);
3246
3247#ifdef CONFIG_TRACER_MAX_TRACE
3248        if (iter->snapshot && iter->trace->use_max_tr)
3249                return ERR_PTR(-EBUSY);
3250#endif
3251
3252        if (!iter->snapshot)
3253                atomic_inc(&trace_record_taskinfo_disabled);
3254
3255        if (*pos != iter->pos) {
3256                iter->ent = NULL;
3257                iter->cpu = 0;
3258                iter->idx = -1;
3259
3260                if (cpu_file == RING_BUFFER_ALL_CPUS) {
3261                        for_each_tracing_cpu(cpu)
3262                                tracing_iter_reset(iter, cpu);
3263                } else
3264                        tracing_iter_reset(iter, cpu_file);
3265
3266                iter->leftover = 0;
3267                for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3268                        ;
3269
3270        } else {
3271                /*
3272                 * If we overflowed the seq_file before, then we want
3273                 * to just reuse the trace_seq buffer again.
3274                 */
3275                if (iter->leftover)
3276                        p = iter;
3277                else {
3278                        l = *pos - 1;
3279                        p = s_next(m, p, &l);
3280                }
3281        }
3282
3283        trace_event_read_lock();
3284        trace_access_lock(cpu_file);
3285        return p;
3286}
3287
3288static void s_stop(struct seq_file *m, void *p)
3289{
3290        struct trace_iterator *iter = m->private;
3291
3292#ifdef CONFIG_TRACER_MAX_TRACE
3293        if (iter->snapshot && iter->trace->use_max_tr)
3294                return;
3295#endif
3296
3297        if (!iter->snapshot)
3298                atomic_dec(&trace_record_taskinfo_disabled);
3299
3300        trace_access_unlock(iter->cpu_file);
3301        trace_event_read_unlock();
3302}
3303
3304static void
3305get_total_entries(struct trace_buffer *buf,
3306                  unsigned long *total, unsigned long *entries)
3307{
3308        unsigned long count;
3309        int cpu;
3310
3311        *total = 0;
3312        *entries = 0;
3313
3314        for_each_tracing_cpu(cpu) {
3315                count = ring_buffer_entries_cpu(buf->buffer, cpu);
3316                /*
3317                 * If this buffer has skipped entries, then we hold all
3318                 * entries for the trace and we need to ignore the
3319                 * ones before the time stamp.
3320                 */
3321                if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3322                        count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3323                        /* total is the same as the entries */
3324                        *total += count;
3325                } else
3326                        *total += count +
3327                                ring_buffer_overrun_cpu(buf->buffer, cpu);
3328                *entries += count;
3329        }
3330}
3331
3332static void print_lat_help_header(struct seq_file *m)
3333{
3334        seq_puts(m, "#                  _------=> CPU#            \n"
3335                    "#                 / _-----=> irqs-off        \n"
3336                    "#                | / _----=> need-resched    \n"
3337                    "#                || / _---=> hardirq/softirq \n"
3338                    "#                ||| / _--=> preempt-depth   \n"
3339                    "#                |||| /     delay            \n"
3340                    "#  cmd     pid   ||||| time  |   caller      \n"
3341                    "#     \\   /      |||||  \\    |   /         \n");
3342}
3343
3344static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3345{
3346        unsigned long total;
3347        unsigned long entries;
3348
3349        get_total_entries(buf, &total, &entries);
3350        seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
3351                   entries, total, num_online_cpus());
3352        seq_puts(m, "#\n");
3353}
3354
3355static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3356                                   unsigned int flags)
3357{
3358        bool tgid = flags & TRACE_ITER_RECORD_TGID;
3359
3360        print_event_info(buf, m);
3361
3362        seq_printf(m, "#           TASK-PID   CPU#   %s  TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");
3363        seq_printf(m, "#              | |       |    %s     |         |\n",      tgid ? "  |      " : "");
3364}
3365
3366static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3367                                       unsigned int flags)
3368{
3369        bool tgid = flags & TRACE_ITER_RECORD_TGID;
3370        const char tgid_space[] = "          ";
3371        const char space[] = "  ";
3372
3373        seq_printf(m, "#                          %s  _-----=> irqs-off\n",
3374                   tgid ? tgid_space : space);
3375        seq_printf(m, "#                          %s / _----=> need-resched\n",
3376                   tgid ? tgid_space : space);
3377        seq_printf(m, "#                          %s| / _---=> hardirq/softirq\n",
3378                   tgid ? tgid_space : space);
3379        seq_printf(m, "#                          %s|| / _--=> preempt-depth\n",
3380                   tgid ? tgid_space : space);
3381        seq_printf(m, "#                          %s||| /     delay\n",
3382                   tgid ? tgid_space : space);
3383        seq_printf(m, "#           TASK-PID   CPU#%s||||    TIMESTAMP  FUNCTION\n",
3384                   tgid ? "   TGID   " : space);
3385        seq_printf(m, "#              | |       | %s||||       |         |\n",
3386                   tgid ? "     |    " : space);
3387}
3388
3389void
3390print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3391{
3392        unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3393        struct trace_buffer *buf = iter->trace_buffer;
3394        struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3395        struct tracer *type = iter->trace;
3396        unsigned long entries;
3397        unsigned long total;
3398        const char *name = "preemption";
3399
3400        name = type->name;
3401
3402        get_total_entries(buf, &total, &entries);
3403
3404        seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3405                   name, UTS_RELEASE);
3406        seq_puts(m, "# -----------------------------------"
3407                 "---------------------------------\n");
3408        seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3409                   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3410                   nsecs_to_usecs(data->saved_latency),
3411                   entries,
3412                   total,
3413                   buf->cpu,
3414#if defined(CONFIG_PREEMPT_NONE)
3415                   "server",
3416#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3417                   "desktop",
3418#elif defined(CONFIG_PREEMPT)
3419                   "preempt",
3420#else
3421                   "unknown",
3422#endif
3423                   /* These are reserved for later use */
3424                   0, 0, 0, 0);
3425#ifdef CONFIG_SMP
3426        seq_printf(m, " #P:%d)\n", num_online_cpus());
3427#else
3428        seq_puts(m, ")\n");
3429#endif
3430        seq_puts(m, "#    -----------------\n");
3431        seq_printf(m, "#    | task: %.16s-%d "
3432                   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3433                   data->comm, data->pid,
3434                   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3435                   data->policy, data->rt_priority);
3436        seq_puts(m, "#    -----------------\n");
3437
3438        if (data->critical_start) {
3439                seq_puts(m, "#  => started at: ");
3440                seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3441                trace_print_seq(m, &iter->seq);
3442                seq_puts(m, "\n#  => ended at:   ");
3443                seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3444                trace_print_seq(m, &iter->seq);
3445                seq_puts(m, "\n#\n");
3446        }
3447
3448        seq_puts(m, "#\n");
3449}
3450
3451static void test_cpu_buff_start(struct trace_iterator *iter)
3452{
3453        struct trace_seq *s = &iter->seq;
3454        struct trace_array *tr = iter->tr;
3455
3456        if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3457                return;
3458
3459        if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3460                return;
3461
3462        if (cpumask_available(iter->started) &&
3463            cpumask_test_cpu(iter->cpu, iter->started))
3464                return;
3465
3466        if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3467                return;
3468
3469        if (cpumask_available(iter->started))
3470                cpumask_set_cpu(iter->cpu, iter->started);
3471
3472        /* Don't print started cpu buffer for the first entry of the trace */
3473        if (iter->idx > 1)
3474                trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3475                                iter->cpu);
3476}
3477
3478static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3479{
3480        struct trace_array *tr = iter->tr;
3481        struct trace_seq *s = &iter->seq;
3482        unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3483        struct trace_entry *entry;
3484        struct trace_event *event;
3485
3486        entry = iter->ent;
3487
3488        test_cpu_buff_start(iter);
3489
3490        event = ftrace_find_event(entry->type);
3491
3492        if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3493                if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3494                        trace_print_lat_context(iter);
3495                else
3496                        trace_print_context(iter);
3497        }
3498
3499        if (trace_seq_has_overflowed(s))
3500                return TRACE_TYPE_PARTIAL_LINE;
3501
3502        if (event)
3503                return event->funcs->trace(iter, sym_flags, event);
3504
3505        trace_seq_printf(s, "Unknown type %d\n", entry->type);
3506
3507        return trace_handle_return(s);
3508}
3509
3510static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3511{
3512        struct trace_array *tr = iter->tr;
3513        struct trace_seq *s = &iter->seq;
3514        struct trace_entry *entry;
3515        struct trace_event *event;
3516
3517        entry = iter->ent;
3518
3519        if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3520                trace_seq_printf(s, "%d %d %llu ",
3521                                 entry->pid, iter->cpu, iter->ts);
3522
3523        if (trace_seq_has_overflowed(s))
3524                return TRACE_TYPE_PARTIAL_LINE;
3525
3526        event = ftrace_find_event(entry->type);
3527        if (event)
3528                return event->funcs->raw(iter, 0, event);
3529
3530        trace_seq_printf(s, "%d ?\n", entry->type);
3531
3532        return trace_handle_return(s);
3533}
3534
3535static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3536{
3537        struct trace_array *tr = iter->tr;
3538        struct trace_seq *s = &iter->seq;
3539        unsigned char newline = '\n';
3540        struct trace_entry *entry;
3541        struct trace_event *event;
3542
3543        entry = iter->ent;
3544
3545        if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3546                SEQ_PUT_HEX_FIELD(s, entry->pid);
3547                SEQ_PUT_HEX_FIELD(s, iter->cpu);
3548                SEQ_PUT_HEX_FIELD(s, iter->ts);
3549                if (trace_seq_has_overflowed(s))
3550                        return TRACE_TYPE_PARTIAL_LINE;
3551        }
3552
3553        event = ftrace_find_event(entry->type);
3554        if (event) {
3555                enum print_line_t ret = event->funcs->hex(iter, 0, event);
3556                if (ret != TRACE_TYPE_HANDLED)
3557                        return ret;
3558        }
3559
3560        SEQ_PUT_FIELD(s, newline);
3561
3562        return trace_handle_return(s);
3563}
3564
3565static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3566{
3567        struct trace_array *tr = iter->tr;
3568        struct trace_seq *s = &iter->seq;
3569        struct trace_entry *entry;
3570        struct trace_event *event;
3571
3572        entry = iter->ent;
3573
3574        if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3575                SEQ_PUT_FIELD(s, entry->pid);
3576                SEQ_PUT_FIELD(s, iter->cpu);
3577                SEQ_PUT_FIELD(s, iter->ts);
3578                if (trace_seq_has_overflowed(s))
3579                        return TRACE_TYPE_PARTIAL_LINE;
3580        }
3581
3582        event = ftrace_find_event(entry->type);
3583        return event ? event->funcs->binary(iter, 0, event) :
3584                TRACE_TYPE_HANDLED;
3585}
3586
3587int trace_empty(struct trace_iterator *iter)
3588{
3589        struct ring_buffer_iter *buf_iter;
3590        int cpu;
3591
3592        /* If we are looking at one CPU buffer, only check that one */
3593        if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3594                cpu = iter->cpu_file;
3595                buf_iter = trace_buffer_iter(iter, cpu);
3596                if (buf_iter) {
3597                        if (!ring_buffer_iter_empty(buf_iter))
3598                                return 0;
3599                } else {
3600                        if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3601                                return 0;
3602                }
3603                return 1;
3604        }
3605
3606        for_each_tracing_cpu(cpu) {
3607                buf_iter = trace_buffer_iter(iter, cpu);
3608                if (buf_iter) {
3609                        if (!ring_buffer_iter_empty(buf_iter))
3610                                return 0;
3611                } else {
3612                        if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3613                                return 0;
3614                }
3615        }
3616
3617        return 1;
3618}
3619
3620/*  Called with trace_event_read_lock() held. */
3621enum print_line_t print_trace_line(struct trace_iterator *iter)
3622{
3623        struct trace_array *tr = iter->tr;
3624        unsigned long trace_flags = tr->trace_flags;
3625        enum print_line_t ret;
3626
3627        if (iter->lost_events) {
3628                trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3629                                 iter->cpu, iter->lost_events);
3630                if (trace_seq_has_overflowed(&iter->seq))
3631                        return TRACE_TYPE_PARTIAL_LINE;
3632        }
3633
3634        if (iter->trace && iter->trace->print_line) {
3635                ret = iter->trace->print_line(iter);
3636                if (ret != TRACE_TYPE_UNHANDLED)
3637                        return ret;
3638        }
3639
3640        if (iter->ent->type == TRACE_BPUTS &&
3641                        trace_flags & TRACE_ITER_PRINTK &&
3642                        trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3643                return trace_print_bputs_msg_only(iter);
3644
3645        if (iter->ent->type == TRACE_BPRINT &&
3646                        trace_flags & TRACE_ITER_PRINTK &&
3647                        trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3648                return trace_print_bprintk_msg_only(iter);
3649
3650        if (iter->ent->type == TRACE_PRINT &&
3651                        trace_flags & TRACE_ITER_PRINTK &&
3652                        trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3653                return trace_print_printk_msg_only(iter);
3654
3655        if (trace_flags & TRACE_ITER_BIN)
3656                return print_bin_fmt(iter);
3657
3658        if (trace_flags & TRACE_ITER_HEX)
3659                return print_hex_fmt(iter);
3660
3661        if (trace_flags & TRACE_ITER_RAW)
3662                return print_raw_fmt(iter);
3663
3664        return print_trace_fmt(iter);
3665}
3666
3667void trace_latency_header(struct seq_file *m)
3668{
3669        struct trace_iterator *iter = m->private;
3670        struct trace_array *tr = iter->tr;
3671
3672        /* print nothing if the buffers are empty */
3673        if (trace_empty(iter))
3674                return;
3675
3676        if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3677                print_trace_header(m, iter);
3678
3679        if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3680                print_lat_help_header(m);
3681}
3682
3683void trace_default_header(struct seq_file *m)
3684{
3685        struct trace_iterator *iter = m->private;
3686        struct trace_array *tr = iter->tr;
3687        unsigned long trace_flags = tr->trace_flags;
3688
3689        if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3690                return;
3691
3692        if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3693                /* print nothing if the buffers are empty */
3694                if (trace_empty(iter))
3695                        return;
3696                print_trace_header(m, iter);
3697                if (!(trace_flags & TRACE_ITER_VERBOSE))
3698                        print_lat_help_header(m);
3699        } else {
3700                if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3701                        if (trace_flags & TRACE_ITER_IRQ_INFO)
3702                                print_func_help_header_irq(iter->trace_buffer,
3703                                                           m, trace_flags);
3704                        else
3705                                print_func_help_header(iter->trace_buffer, m,
3706                                                       trace_flags);
3707                }
3708        }
3709}
3710
3711static void test_ftrace_alive(struct seq_file *m)
3712{
3713        if (!ftrace_is_dead())
3714                return;
3715        seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3716                    "#          MAY BE MISSING FUNCTION EVENTS\n");
3717}
3718
3719#ifdef CONFIG_TRACER_MAX_TRACE
3720static void show_snapshot_main_help(struct seq_file *m)
3721{
3722        seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3723                    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3724                    "#                      Takes a snapshot of the main buffer.\n"
3725                    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3726                    "#                      (Doesn't have to be '2' works with any number that\n"
3727                    "#                       is not a '0' or '1')\n");
3728}
3729
3730static void show_snapshot_percpu_help(struct seq_file *m)
3731{
3732        seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3733#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3734        seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3735                    "#                      Takes a snapshot of the main buffer for this cpu.\n");
3736#else
3737        seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3738                    "#                     Must use main snapshot file to allocate.\n");
3739#endif
3740        seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3741                    "#                      (Doesn't have to be '2' works with any number that\n"
3742                    "#                       is not a '0' or '1')\n");
3743}
3744
3745static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3746{
3747        if (iter->tr->allocated_snapshot)
3748                seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3749        else
3750                seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3751
3752        seq_puts(m, "# Snapshot commands:\n");
3753        if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3754                show_snapshot_main_help(m);
3755        else
3756                show_snapshot_percpu_help(m);
3757}
3758#else
3759/* Should never be called */
3760static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3761#endif
3762
3763static int s_show(struct seq_file *m, void *v)
3764{
3765        struct trace_iterator *iter = v;
3766        int ret;
3767
3768        if (iter->ent == NULL) {
3769                if (iter->tr) {
3770                        seq_printf(m, "# tracer: %s\n", iter->trace->name);
3771                        seq_puts(m, "#\n");
3772                        test_ftrace_alive(m);
3773                }
3774                if (iter->snapshot && trace_empty(iter))
3775                        print_snapshot_help(m, iter);
3776                else if (iter->trace && iter->trace->print_header)
3777                        iter->trace->print_header(m);
3778                else
3779                        trace_default_header(m);
3780
3781        } else if (iter->leftover) {
3782                /*
3783                 * If we filled the seq_file buffer earlier, we
3784                 * want to just show it now.
3785                 */
3786                ret = trace_print_seq(m, &iter->seq);
3787
3788                /* ret should this time be zero, but you never know */
3789                iter->leftover = ret;
3790
3791        } else {
3792                print_trace_line(iter);
3793                ret = trace_print_seq(m, &iter->seq);
3794                /*
3795                 * If we overflow the seq_file buffer, then it will
3796                 * ask us for this data again at start up.
3797                 * Use that instead.
3798                 *  ret is 0 if seq_file write succeeded.
3799                 *        -1 otherwise.
3800                 */
3801                iter->leftover = ret;
3802        }
3803
3804        return 0;
3805}
3806
3807/*
3808 * Should be used after trace_array_get(), trace_types_lock
3809 * ensures that i_cdev was already initialized.
3810 */
3811static inline int tracing_get_cpu(struct inode *inode)
3812{
3813        if (inode->i_cdev) /* See trace_create_cpu_file() */
3814                return (long)inode->i_cdev - 1;
3815        return RING_BUFFER_ALL_CPUS;
3816}
3817
3818static const struct seq_operations tracer_seq_ops = {
3819        .start          = s_start,
3820        .next           = s_next,
3821        .stop           = s_stop,
3822        .show           = s_show,
3823};
3824
3825static struct trace_iterator *
3826__tracing_open(struct inode *inode, struct file *file, bool snapshot)
3827{
3828        struct trace_array *tr = inode->i_private;
3829        struct trace_iterator *iter;
3830        int cpu;
3831
3832        if (tracing_disabled)
3833                return ERR_PTR(-ENODEV);
3834
3835        iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3836        if (!iter)
3837                return ERR_PTR(-ENOMEM);
3838
3839        iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3840                                    GFP_KERNEL);
3841        if (!iter->buffer_iter)
3842                goto release;
3843
3844        /*
3845         * We make a copy of the current tracer to avoid concurrent
3846         * changes on it while we are reading.
3847         */
3848        mutex_lock(&trace_types_lock);
3849        iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3850        if (!iter->trace)
3851                goto fail;
3852
3853        *iter->trace = *tr->current_trace;
3854
3855        if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3856                goto fail;
3857
3858        iter->tr = tr;
3859
3860#ifdef CONFIG_TRACER_MAX_TRACE
3861        /* Currently only the top directory has a snapshot */
3862        if (tr->current_trace->print_max || snapshot)
3863                iter->trace_buffer = &tr->max_buffer;
3864        else
3865#endif
3866                iter->trace_buffer = &tr->trace_buffer;
3867        iter->snapshot = snapshot;
3868        iter->pos = -1;
3869        iter->cpu_file = tracing_get_cpu(inode);
3870        mutex_init(&iter->mutex);
3871
3872        /* Notify the tracer early; before we stop tracing. */
3873        if (iter->trace && iter->trace->open)
3874                iter->trace->open(iter);
3875
3876        /* Annotate start of buffers if we had overruns */
3877        if (ring_buffer_overruns(iter->trace_buffer->buffer))
3878                iter->iter_flags |= TRACE_FILE_ANNOTATE;
3879
3880        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3881        if (trace_clocks[tr->clock_id].in_ns)
3882                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3883
3884        /* stop the trace while dumping if we are not opening "snapshot" */
3885        if (!iter->snapshot)
3886                tracing_stop_tr(tr);
3887
3888        if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3889                for_each_tracing_cpu(cpu) {
3890                        iter->buffer_iter[cpu] =
3891                                ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3892                }
3893                ring_buffer_read_prepare_sync();
3894                for_each_tracing_cpu(cpu) {
3895                        ring_buffer_read_start(iter->buffer_iter[cpu]);
3896                        tracing_iter_reset(iter, cpu);
3897                }
3898        } else {
3899                cpu = iter->cpu_file;
3900                iter->buffer_iter[cpu] =
3901                        ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3902                ring_buffer_read_prepare_sync();
3903                ring_buffer_read_start(iter->buffer_iter[cpu]);
3904                tracing_iter_reset(iter, cpu);
3905        }
3906
3907        mutex_unlock(&trace_types_lock);
3908
3909        return iter;
3910
3911 fail:
3912        mutex_unlock(&trace_types_lock);
3913        kfree(iter->trace);
3914        kfree(iter->buffer_iter);
3915release:
3916        seq_release_private(inode, file);
3917        return ERR_PTR(-ENOMEM);
3918}
3919
3920int tracing_open_generic(struct inode *inode, struct file *filp)
3921{
3922        if (tracing_disabled)
3923                return -ENODEV;
3924
3925        filp->private_data = inode->i_private;
3926        return 0;
3927}
3928
3929bool tracing_is_disabled(void)
3930{
3931        return (tracing_disabled) ? true: false;
3932}
3933
3934/*
3935 * Open and update trace_array ref count.
3936 * Must have the current trace_array passed to it.
3937 */
3938static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3939{
3940        struct trace_array *tr = inode->i_private;
3941
3942        if (tracing_disabled)
3943                return -ENODEV;
3944
3945        if (trace_array_get(tr) < 0)
3946                return -ENODEV;
3947
3948        filp->private_data = inode->i_private;
3949
3950        return 0;
3951}
3952
3953static int tracing_release(struct inode *inode, struct file *file)
3954{
3955        struct trace_array *tr = inode->i_private;
3956        struct seq_file *m = file->private_data;
3957        struct trace_iterator *iter;
3958        int cpu;
3959
3960        if (!(file->f_mode & FMODE_READ)) {
3961                trace_array_put(tr);
3962                return 0;
3963        }
3964
3965        /* Writes do not use seq_file */
3966        iter = m->private;
3967        mutex_lock(&trace_types_lock);
3968
3969        for_each_tracing_cpu(cpu) {
3970                if (iter->buffer_iter[cpu])
3971                        ring_buffer_read_finish(iter->buffer_iter[cpu]);
3972        }
3973
3974        if (iter->trace && iter->trace->close)
3975                iter->trace->close(iter);
3976
3977        if (!iter->snapshot)
3978                /* reenable tracing if it was previously enabled */
3979                tracing_start_tr(tr);
3980
3981        __trace_array_put(tr);
3982
3983        mutex_unlock(&trace_types_lock);
3984
3985        mutex_destroy(&iter->mutex);
3986        free_cpumask_var(iter->started);
3987        kfree(iter->trace);
3988        kfree(iter->buffer_iter);
3989        seq_release_private(inode, file);
3990
3991        return 0;
3992}
3993
3994static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3995{
3996        struct trace_array *tr = inode->i_private;
3997
3998        trace_array_put(tr);
3999        return 0;
4000}
4001
4002static int tracing_single_release_tr(struct inode *inode, struct file *file)
4003{
4004        struct trace_array *tr = inode->i_private;
4005
4006        trace_array_put(tr);
4007
4008        return single_release(inode, file);
4009}
4010
4011static int tracing_open(struct inode *inode, struct file *file)
4012{
4013        struct trace_array *tr = inode->i_private;
4014        struct trace_iterator *iter;
4015        int ret = 0;
4016
4017        if (trace_array_get(tr) < 0)
4018                return -ENODEV;
4019
4020        /* If this file was open for write, then erase contents */
4021        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4022                int cpu = tracing_get_cpu(inode);
4023                struct trace_buffer *trace_buf = &tr->trace_buffer;
4024
4025#ifdef CONFIG_TRACER_MAX_TRACE
4026                if (tr->current_trace->print_max)
4027                        trace_buf = &tr->max_buffer;
4028#endif
4029
4030                if (cpu == RING_BUFFER_ALL_CPUS)
4031                        tracing_reset_online_cpus(trace_buf);
4032                else
4033                        tracing_reset(trace_buf, cpu);
4034        }
4035
4036        if (file->f_mode & FMODE_READ) {
4037                iter = __tracing_open(inode, file, false);
4038                if (IS_ERR(iter))
4039                        ret = PTR_ERR(iter);
4040                else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4041                        iter->iter_flags |= TRACE_FILE_LAT_FMT;
4042        }
4043
4044        if (ret < 0)
4045                trace_array_put(tr);
4046
4047        return ret;
4048}
4049
4050/*
4051 * Some tracers are not suitable for instance buffers.
4052 * A tracer is always available for the global array (toplevel)
4053 * or if it explicitly states that it is.
4054 */
4055static bool
4056trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4057{
4058        return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4059}
4060
4061/* Find the next tracer that this trace array may use */
4062static struct tracer *
4063get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4064{
4065        while (t && !trace_ok_for_array(t, tr))
4066                t = t->next;
4067
4068        return t;
4069}
4070
4071static void *
4072t_next(struct seq_file *m, void *v, loff_t *pos)
4073{
4074        struct trace_array *tr = m->private;
4075        struct tracer *t = v;
4076
4077        (*pos)++;
4078
4079        if (t)
4080                t = get_tracer_for_array(tr, t->next);
4081
4082        return t;
4083}
4084
4085static void *t_start(struct seq_file *m, loff_t *pos)
4086{
4087        struct trace_array *tr = m->private;
4088        struct tracer *t;
4089        loff_t l = 0;
4090
4091        mutex_lock(&trace_types_lock);
4092
4093        t = get_tracer_for_array(tr, trace_types);
4094        for (; t && l < *pos; t = t_next(m, t, &l))
4095                        ;
4096
4097        return t;
4098}
4099
4100static void t_stop(struct seq_file *m, void *p)
4101{
4102        mutex_unlock(&trace_types_lock);
4103}
4104
4105static int t_show(struct seq_file *m, void *v)
4106{
4107        struct tracer *t = v;
4108
4109        if (!t)
4110                return 0;
4111
4112        seq_puts(m, t->name);
4113        if (t->next)
4114                seq_putc(m, ' ');
4115        else
4116                seq_putc(m, '\n');
4117
4118        return 0;
4119}
4120
4121static const struct seq_operations show_traces_seq_ops = {
4122        .start          = t_start,
4123        .next           = t_next,
4124        .stop           = t_stop,
4125        .show           = t_show,
4126};
4127
4128static int show_traces_open(struct inode *inode, struct file *file)
4129{
4130        struct trace_array *tr = inode->i_private;
4131        struct seq_file *m;
4132        int ret;
4133
4134        if (tracing_disabled)
4135                return -ENODEV;
4136
4137        ret = seq_open(file, &show_traces_seq_ops);
4138        if (ret)
4139                return ret;
4140
4141        m = file->private_data;
4142        m->private = tr;
4143
4144        return 0;
4145}
4146
4147static ssize_t
4148tracing_write_stub(struct file *filp, const char __user *ubuf,
4149                   size_t count, loff_t *ppos)
4150{
4151        return count;
4152}
4153
4154loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4155{
4156        int ret;
4157
4158        if (file->f_mode & FMODE_READ)
4159                ret = seq_lseek(file, offset, whence);
4160        else
4161                file->f_pos = ret = 0;
4162
4163        return ret;
4164}
4165
4166static const struct file_operations tracing_fops = {
4167        .open           = tracing_open,
4168        .read           = seq_read,
4169        .write          = tracing_write_stub,
4170        .llseek         = tracing_lseek,
4171        .release        = tracing_release,
4172};
4173
4174static const struct file_operations show_traces_fops = {
4175        .open           = show_traces_open,
4176        .read           = seq_read,
4177        .release        = seq_release,
4178        .llseek         = seq_lseek,
4179};
4180
4181/*
4182 * The tracer itself will not take this lock, but still we want
4183 * to provide a consistent cpumask to user-space:
4184 */
4185static DEFINE_MUTEX(tracing_cpumask_update_lock);
4186
4187/*
4188 * Temporary storage for the character representation of the
4189 * CPU bitmask (and one more byte for the newline):
4190 */
4191static char mask_str[NR_CPUS + 1];
4192
4193static ssize_t
4194tracing_cpumask_read(struct file *filp, char __user *ubuf,
4195                     size_t count, loff_t *ppos)
4196{
4197        struct trace_array *tr = file_inode(filp)->i_private;
4198        int len;
4199
4200        mutex_lock(&tracing_cpumask_update_lock);
4201
4202        len = snprintf(mask_str, count, "%*pb\n",
4203                       cpumask_pr_args(tr->tracing_cpumask));
4204        if (len >= count) {
4205                count = -EINVAL;
4206                goto out_err;
4207        }
4208        count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4209
4210out_err:
4211        mutex_unlock(&tracing_cpumask_update_lock);
4212
4213        return count;
4214}
4215
4216static ssize_t
4217tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4218                      size_t count, loff_t *ppos)
4219{
4220        struct trace_array *tr = file_inode(filp)->i_private;
4221        cpumask_var_t tracing_cpumask_new;
4222        int err, cpu;
4223
4224        if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4225                return -ENOMEM;
4226
4227        err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4228        if (err)
4229                goto err_unlock;
4230
4231        mutex_lock(&tracing_cpumask_update_lock);
4232
4233        local_irq_disable();
4234        arch_spin_lock(&tr->max_lock);
4235        for_each_tracing_cpu(cpu) {
4236                /*
4237                 * Increase/decrease the disabled counter if we are
4238                 * about to flip a bit in the cpumask:
4239                 */
4240                if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4241                                !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4242                        atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4243                        ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4244                }
4245                if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4246                                cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4247                        atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4248                        ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4249                }
4250        }
4251        arch_spin_unlock(&tr->max_lock);
4252        local_irq_enable();
4253
4254        cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4255
4256        mutex_unlock(&tracing_cpumask_update_lock);
4257        free_cpumask_var(tracing_cpumask_new);
4258
4259        return count;
4260
4261err_unlock:
4262        free_cpumask_var(tracing_cpumask_new);
4263
4264        return err;
4265}
4266
4267static const struct file_operations tracing_cpumask_fops = {
4268        .open           = tracing_open_generic_tr,
4269        .read           = tracing_cpumask_read,
4270        .write          = tracing_cpumask_write,
4271        .release        = tracing_release_generic_tr,
4272        .llseek         = generic_file_llseek,
4273};
4274
4275static int tracing_trace_options_show(struct seq_file *m, void *v)
4276{
4277        struct tracer_opt *trace_opts;
4278        struct trace_array *tr = m->private;
4279        u32 tracer_flags;
4280        int i;
4281
4282        mutex_lock(&trace_types_lock);
4283        tracer_flags = tr->current_trace->flags->val;
4284        trace_opts = tr->current_trace->flags->opts;
4285
4286        for (i = 0; trace_options[i]; i++) {
4287                if (tr->trace_flags & (1 << i))
4288                        seq_printf(m, "%s\n", trace_options[i]);
4289                else
4290                        seq_printf(m, "no%s\n", trace_options[i]);
4291        }
4292
4293        for (i = 0; trace_opts[i].name; i++) {
4294                if (tracer_flags & trace_opts[i].bit)
4295                        seq_printf(m, "%s\n", trace_opts[i].name);
4296                else
4297                        seq_printf(m, "no%s\n", trace_opts[i].name);
4298        }
4299        mutex_unlock(&trace_types_lock);
4300
4301        return 0;
4302}
4303
4304static int __set_tracer_option(struct trace_array *tr,
4305                               struct tracer_flags *tracer_flags,
4306                               struct tracer_opt *opts, int neg)
4307{
4308        struct tracer *trace = tracer_flags->trace;
4309        int ret;
4310
4311        ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4312        if (ret)
4313                return ret;
4314
4315        if (neg)
4316                tracer_flags->val &= ~opts->bit;
4317        else
4318                tracer_flags->val |= opts->bit;
4319        return 0;
4320}
4321
4322/* Try to assign a tracer specific option */
4323static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4324{
4325        struct tracer *trace = tr->current_trace;
4326        struct tracer_flags *tracer_flags = trace->flags;
4327        struct tracer_opt *opts = NULL;
4328        int i;
4329
4330        for (i = 0; tracer_flags->opts[i].name; i++) {
4331                opts = &tracer_flags->opts[i];
4332
4333                if (strcmp(cmp, opts->name) == 0)
4334                        return __set_tracer_option(tr, trace->flags, opts, neg);
4335        }
4336
4337        return -EINVAL;
4338}
4339
4340/* Some tracers require overwrite to stay enabled */
4341int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4342{
4343        if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4344                return -1;
4345
4346        return 0;
4347}
4348
4349int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4350{
4351        /* do nothing if flag is already set */
4352        if (!!(tr->trace_flags & mask) == !!enabled)
4353                return 0;
4354
4355        /* Give the tracer a chance to approve the change */
4356        if (tr->current_trace->flag_changed)
4357                if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4358                        return -EINVAL;
4359
4360        if (enabled)
4361                tr->trace_flags |= mask;
4362        else
4363                tr->trace_flags &= ~mask;
4364
4365        if (mask == TRACE_ITER_RECORD_CMD)
4366                trace_event_enable_cmd_record(enabled);
4367
4368        if (mask == TRACE_ITER_RECORD_TGID) {
4369                if (!tgid_map)
4370                        tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4371                                           GFP_KERNEL);
4372                if (!tgid_map) {
4373                        tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4374                        return -ENOMEM;
4375                }
4376
4377                trace_event_enable_tgid_record(enabled);
4378        }
4379
4380        if (mask == TRACE_ITER_EVENT_FORK)
4381                trace_event_follow_fork(tr, enabled);
4382
4383        if (mask == TRACE_ITER_FUNC_FORK)
4384                ftrace_pid_follow_fork(tr, enabled);
4385
4386        if (mask == TRACE_ITER_OVERWRITE) {
4387                ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4388#ifdef CONFIG_TRACER_MAX_TRACE
4389                ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4390#endif
4391        }
4392
4393        if (mask == TRACE_ITER_PRINTK) {
4394                trace_printk_start_stop_comm(enabled);
4395                trace_printk_control(enabled);
4396        }
4397
4398        return 0;
4399}
4400
4401static int trace_set_options(struct trace_array *tr, char *option)
4402{
4403        char *cmp;
4404        int neg = 0;
4405        int ret = -ENODEV;
4406        int i;
4407        size_t orig_len = strlen(option);
4408
4409        cmp = strstrip(option);
4410
4411        if (strncmp(cmp, "no", 2) == 0) {
4412                neg = 1;
4413                cmp += 2;
4414        }
4415
4416        mutex_lock(&trace_types_lock);
4417
4418        for (i = 0; trace_options[i]; i++) {
4419                if (strcmp(cmp, trace_options[i]) == 0) {
4420                        ret = set_tracer_flag(tr, 1 << i, !neg);
4421                        break;
4422                }
4423        }
4424
4425        /* If no option could be set, test the specific tracer options */
4426        if (!trace_options[i])
4427                ret = set_tracer_option(tr, cmp, neg);
4428
4429        mutex_unlock(&trace_types_lock);
4430
4431        /*
4432         * If the first trailing whitespace is replaced with '\0' by strstrip,
4433         * turn it back into a space.
4434         */
4435        if (orig_len > strlen(option))
4436                option[strlen(option)] = ' ';
4437
4438        return ret;
4439}
4440
4441static void __init apply_trace_boot_options(void)
4442{
4443        char *buf = trace_boot_options_buf;
4444        char *option;
4445
4446        while (true) {
4447                option = strsep(&buf, ",");
4448
4449                if (!option)
4450                        break;
4451
4452                if (*option)
4453                        trace_set_options(&global_trace, option);
4454
4455                /* Put back the comma to allow this to be called again */
4456                if (buf)
4457                        *(buf - 1) = ',';
4458        }
4459}
4460
4461static ssize_t
4462tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4463                        size_t cnt, loff_t *ppos)
4464{
4465        struct seq_file *m = filp->private_data;
4466        struct trace_array *tr = m->private;
4467        char buf[64];
4468        int ret;
4469
4470        if (cnt >= sizeof(buf))
4471                return -EINVAL;
4472
4473        if (copy_from_user(buf, ubuf, cnt))
4474                return -EFAULT;
4475
4476        buf[cnt] = 0;
4477
4478        ret = trace_set_options(tr, buf);
4479        if (ret < 0)
4480                return ret;
4481
4482        *ppos += cnt;
4483
4484        return cnt;
4485}
4486
4487static int tracing_trace_options_open(struct inode *inode, struct file *file)
4488{
4489        struct trace_array *tr = inode->i_private;
4490        int ret;
4491
4492        if (tracing_disabled)
4493                return -ENODEV;
4494
4495        if (trace_array_get(tr) < 0)
4496                return -ENODEV;
4497
4498        ret = single_open(file, tracing_trace_options_show, inode->i_private);
4499        if (ret < 0)
4500                trace_array_put(tr);
4501
4502        return ret;
4503}
4504
4505static const struct file_operations tracing_iter_fops = {
4506        .open           = tracing_trace_options_open,
4507        .read           = seq_read,
4508        .llseek         = seq_lseek,
4509        .release        = tracing_single_release_tr,
4510        .write          = tracing_trace_options_write,
4511};
4512
4513static const char readme_msg[] =
4514        "tracing mini-HOWTO:\n\n"
4515        "# echo 0 > tracing_on : quick way to disable tracing\n"
4516        "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4517        " Important files:\n"
4518        "  trace\t\t\t- The static contents of the buffer\n"
4519        "\t\t\t  To clear the buffer write into this file: echo > trace\n"
4520        "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4521        "  current_tracer\t- function and latency tracers\n"
4522        "  available_tracers\t- list of configured tracers for current_tracer\n"
4523        "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
4524        "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
4525        "  trace_clock\t\t-change the clock used to order events\n"
4526        "       local:   Per cpu clock but may not be synced across CPUs\n"
4527        "      global:   Synced across CPUs but slows tracing down.\n"
4528        "     counter:   Not a clock, but just an increment\n"
4529        "      uptime:   Jiffy counter from time of boot\n"
4530        "        perf:   Same clock that perf events use\n"
4531#ifdef CONFIG_X86_64
4532        "     x86-tsc:   TSC cycle counter\n"
4533#endif
4534        "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4535        "\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4536        "  tracing_cpumask\t- Limit which CPUs to trace\n"
4537        "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4538        "\t\t\t  Remove sub-buffer with rmdir\n"
4539        "  trace_options\t\t- Set format or modify how tracing happens\n"
4540        "\t\t\t  Disable an option by adding a suffix 'no' to the\n"
4541        "\t\t\t  option name\n"
4542        "  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4543#ifdef CONFIG_DYNAMIC_FTRACE
4544        "\n  available_filter_functions - list of functions that can be filtered on\n"
4545        "  set_ftrace_filter\t- echo function name in here to only trace these\n"
4546        "\t\t\t  functions\n"
4547        "\t     accepts: func_full_name or glob-matching-pattern\n"
4548        "\t     modules: Can select a group via module\n"
4549        "\t      Format: :mod:<module-name>\n"
4550        "\t     example: echo :mod:ext3 > set_ftrace_filter\n"
4551        "\t    triggers: a command to perform when function is hit\n"
4552        "\t      Format: <function>:<trigger>[:count]\n"
4553        "\t     trigger: traceon, traceoff\n"
4554        "\t\t      enable_event:<system>:<event>\n"
4555        "\t\t      disable_event:<system>:<event>\n"
4556#ifdef CONFIG_STACKTRACE
4557        "\t\t      stacktrace\n"
4558#endif
4559#ifdef CONFIG_TRACER_SNAPSHOT
4560        "\t\t      snapshot\n"
4561#endif
4562        "\t\t      dump\n"
4563        "\t\t      cpudump\n"
4564        "\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
4565        "\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
4566        "\t     The first one will disable tracing every time do_fault is hit\n"
4567        "\t     The second will disable tracing at most 3 times when do_trap is hit\n"
4568        "\t       The first time do trap is hit and it disables tracing, the\n"
4569        "\t       counter will decrement to 2. If tracing is already disabled,\n"
4570        "\t       the counter will not decrement. It only decrements when the\n"
4571        "\t       trigger did work\n"
4572        "\t     To remove trigger without count:\n"
4573        "\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
4574        "\t     To remove trigger with a count:\n"
4575        "\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4576        "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
4577        "\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4578        "\t    modules: Can select a group via module command :mod:\n"
4579        "\t    Does not accept triggers\n"
4580#endif /* CONFIG_DYNAMIC_FTRACE */
4581#ifdef CONFIG_FUNCTION_TRACER
4582        "  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4583        "\t\t    (function)\n"
4584#endif
4585#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4586        "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4587        "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4588        "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4589#endif
4590#ifdef CONFIG_TRACER_SNAPSHOT
4591        "\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
4592        "\t\t\t  snapshot buffer. Read the contents for more\n"
4593        "\t\t\t  information\n"
4594#endif
4595#ifdef CONFIG_STACK_TRACER
4596        "  stack_trace\t\t- Shows the max stack trace when active\n"
4597        "  stack_max_size\t- Shows current max stack size that was traced\n"
4598        "\t\t\t  Write into this file to reset the max size (trigger a\n"
4599        "\t\t\t  new trace)\n"
4600#ifdef CONFIG_DYNAMIC_FTRACE
4601        "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4602        "\t\t\t  traces\n"
4603#endif
4604#endif /* CONFIG_STACK_TRACER */
4605#ifdef CONFIG_KPROBE_EVENTS
4606        "  kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4607        "\t\t\t  Write into this file to define/undefine new trace events.\n"
4608#endif
4609#ifdef CONFIG_UPROBE_EVENTS
4610        "  uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4611        "\t\t\t  Write into this file to define/undefine new trace events.\n"
4612#endif
4613#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4614        "\t  accepts: event-definitions (one definition per line)\n"
4615        "\t   Format: p[:[<group>/]<event>] <place> [<args>]\n"
4616        "\t           r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4617        "\t           -:[<group>/]<event>\n"
4618#ifdef CONFIG_KPROBE_EVENTS
4619        "\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4620  "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4621#endif
4622#ifdef CONFIG_UPROBE_EVENTS
4623        "\t    place: <path>:<offset>\n"
4624#endif
4625        "\t     args: <name>=fetcharg[:type]\n"
4626        "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4627        "\t           $stack<index>, $stack, $retval, $comm\n"
4628        "\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4629        "\t           b<bit-width>@<bit-offset>/<container-size>\n"
4630#endif
4631        "  events/\t\t- Directory containing all trace event subsystems:\n"
4632        "      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4633        "  events/<system>/\t- Directory containing all trace events for <system>:\n"
4634        "      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4635        "\t\t\t  events\n"
4636        "      filter\t\t- If set, only events passing filter are traced\n"
4637        "  events/<system>/<event>/\t- Directory containing control files for\n"
4638        "\t\t\t  <event>:\n"
4639        "      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4640        "      filter\t\t- If set, only events passing filter are traced\n"
4641        "      trigger\t\t- If set, a command to perform when event is hit\n"
4642        "\t    Format: <trigger>[:count][if <filter>]\n"
4643        "\t   trigger: traceon, traceoff\n"
4644        "\t            enable_event:<system>:<event>\n"
4645        "\t            disable_event:<system>:<event>\n"
4646#ifdef CONFIG_HIST_TRIGGERS
4647        "\t            enable_hist:<system>:<event>\n"
4648        "\t            disable_hist:<system>:<event>\n"
4649#endif
4650#ifdef CONFIG_STACKTRACE
4651        "\t\t    stacktrace\n"
4652#endif
4653#ifdef CONFIG_TRACER_SNAPSHOT
4654        "\t\t    snapshot\n"
4655#endif
4656#ifdef CONFIG_HIST_TRIGGERS
4657        "\t\t    hist (see below)\n"
4658#endif
4659        "\t   example: echo traceoff > events/block/block_unplug/trigger\n"
4660        "\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
4661        "\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4662        "\t                  events/block/block_unplug/trigger\n"
4663        "\t   The first disables tracing every time block_unplug is hit.\n"
4664        "\t   The second disables tracing the first 3 times block_unplug is hit.\n"
4665        "\t   The third enables the kmalloc event the first 3 times block_unplug\n"
4666        "\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4667        "\t   Like function triggers, the counter is only decremented if it\n"
4668        "\t    enabled or disabled tracing.\n"
4669        "\t   To remove a trigger without a count:\n"
4670        "\t     echo '!<trigger> > <system>/<event>/trigger\n"
4671        "\t   To remove a trigger with a count:\n"
4672        "\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
4673        "\t   Filters can be ignored when removing a trigger.\n"
4674#ifdef CONFIG_HIST_TRIGGERS
4675        "      hist trigger\t- If set, event hits are aggregated into a hash table\n"
4676        "\t    Format: hist:keys=<field1[,field2,...]>\n"
4677        "\t            [:values=<field1[,field2,...]>]\n"
4678        "\t            [:sort=<field1[,field2,...]>]\n"
4679        "\t            [:size=#entries]\n"
4680        "\t            [:pause][:continue][:clear]\n"
4681        "\t            [:name=histname1]\n"
4682        "\t            [if <filter>]\n\n"
4683        "\t    When a matching event is hit, an entry is added to a hash\n"
4684        "\t    table using the key(s) and value(s) named, and the value of a\n"
4685        "\t    sum called 'hitcount' is incremented.  Keys and values\n"
4686        "\t    correspond to fields in the event's format description.  Keys\n"
4687        "\t    can be any field, or the special string 'stacktrace'.\n"
4688        "\t    Compound keys consisting of up to two fields can be specified\n"
4689        "\t    by the 'keys' keyword.  Values must correspond to numeric\n"
4690        "\t    fields.  Sort keys consisting of up to two fields can be\n"
4691        "\t    specified using the 'sort' keyword.  The sort direction can\n"
4692        "\t    be modified by appending '.descending' or '.ascending' to a\n"
4693        "\t    sort field.  The 'size' parameter can be used to specify more\n"
4694        "\t    or fewer than the default 2048 entries for the hashtable size.\n"
4695        "\t    If a hist trigger is given a name using the 'name' parameter,\n"
4696        "\t    its histogram data will be shared with other triggers of the\n"
4697        "\t    same name, and trigger hits will update this common data.\n\n"
4698        "\t    Reading the 'hist' file for the event will dump the hash\n"
4699        "\t    table in its entirety to stdout.  If there are multiple hist\n"
4700        "\t    triggers attached to an event, there will be a table for each\n"
4701        "\t    trigger in the output.  The table displayed for a named\n"
4702        "\t    trigger will be the same as any other instance having the\n"
4703        "\t    same name.  The default format used to display a given field\n"
4704        "\t    can be modified by appending any of the following modifiers\n"
4705        "\t    to the field name, as applicable:\n\n"
4706        "\t            .hex        display a number as a hex value\n"
4707        "\t            .sym        display an address as a symbol\n"
4708        "\t            .sym-offset display an address as a symbol and offset\n"
4709        "\t            .execname   display a common_pid as a program name\n"
4710        "\t            .syscall    display a syscall id as a syscall name\n\n"
4711        "\t            .log2       display log2 value rather than raw number\n\n"
4712        "\t    The 'pause' parameter can be used to pause an existing hist\n"
4713        "\t    trigger or to start a hist trigger but not log any events\n"
4714        "\t    until told to do so.  'continue' can be used to start or\n"
4715        "\t    restart a paused hist trigger.\n\n"
4716        "\t    The 'clear' parameter will clear the contents of a running\n"
4717        "\t    hist trigger and leave its current paused/active state\n"
4718        "\t    unchanged.\n\n"
4719        "\t    The enable_hist and disable_hist triggers can be used to\n"
4720        "\t    have one event conditionally start and stop another event's\n"
4721        "\t    already-attached hist trigger.  The syntax is analagous to\n"
4722        "\t    the enable_event and disable_event triggers.\n"
4723#endif
4724;
4725
4726static ssize_t
4727tracing_readme_read(struct file *filp, char __user *ubuf,
4728                       size_t cnt, loff_t *ppos)
4729{
4730        return simple_read_from_buffer(ubuf, cnt, ppos,
4731                                        readme_msg, strlen(readme_msg));
4732}
4733
4734static const struct file_operations tracing_readme_fops = {
4735        .open           = tracing_open_generic,
4736        .read           = tracing_readme_read,
4737        .llseek         = generic_file_llseek,
4738};
4739
4740static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4741{
4742        int *ptr = v;
4743
4744        if (*pos || m->count)
4745                ptr++;
4746
4747        (*pos)++;
4748
4749        for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4750                if (trace_find_tgid(*ptr))
4751                        return ptr;
4752        }
4753
4754        return NULL;
4755}
4756
4757static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4758{
4759        void *v;
4760        loff_t l = 0;
4761
4762        if (!tgid_map)
4763                return NULL;
4764
4765        v = &tgid_map[0];
4766        while (l <= *pos) {
4767                v = saved_tgids_next(m, v, &l);
4768                if (!v)
4769                        return NULL;
4770        }
4771
4772        return v;
4773}
4774
4775static void saved_tgids_stop(struct seq_file *m, void *v)
4776{
4777}
4778
4779static int saved_tgids_show(struct seq_file *m, void *v)
4780{
4781        int pid = (int *)v - tgid_map;
4782
4783        seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4784        return 0;
4785}
4786
4787static const struct seq_operations tracing_saved_tgids_seq_ops = {
4788        .start          = saved_tgids_start,
4789        .stop           = saved_tgids_stop,
4790        .next           = saved_tgids_next,
4791        .show           = saved_tgids_show,
4792};
4793
4794static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4795{
4796        if (tracing_disabled)
4797                return -ENODEV;
4798
4799        return seq_open(filp, &tracing_saved_tgids_seq_ops);
4800}
4801
4802
4803static const struct file_operations tracing_saved_tgids_fops = {
4804        .open           = tracing_saved_tgids_open,
4805        .read           = seq_read,
4806        .llseek         = seq_lseek,
4807        .release        = seq_release,
4808};
4809
4810static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4811{
4812        unsigned int *ptr = v;
4813
4814        if (*pos || m->count)
4815                ptr++;
4816
4817        (*pos)++;
4818
4819        for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4820             ptr++) {
4821                if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4822                        continue;
4823
4824                return ptr;
4825        }
4826
4827        return NULL;
4828}
4829
4830static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4831{
4832        void *v;
4833        loff_t l = 0;
4834
4835        preempt_disable();
4836        arch_spin_lock(&trace_cmdline_lock);
4837
4838        v = &savedcmd->map_cmdline_to_pid[0];
4839        while (l <= *pos) {
4840                v = saved_cmdlines_next(m, v, &l);
4841                if (!v)
4842                        return NULL;
4843        }
4844
4845        return v;
4846}
4847
4848static void saved_cmdlines_stop(struct seq_file *m, void *v)
4849{
4850        arch_spin_unlock(&trace_cmdline_lock);
4851        preempt_enable();
4852}
4853
4854static int saved_cmdlines_show(struct seq_file *m, void *v)
4855{
4856        char buf[TASK_COMM_LEN];
4857        unsigned int *pid = v;
4858
4859        __trace_find_cmdline(*pid, buf);
4860        seq_printf(m, "%d %s\n", *pid, buf);
4861        return 0;
4862}
4863
4864static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4865        .start          = saved_cmdlines_start,
4866        .next           = saved_cmdlines_next,
4867        .stop           = saved_cmdlines_stop,
4868        .show           = saved_cmdlines_show,
4869};
4870
4871static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4872{
4873        if (tracing_disabled)
4874                return -ENODEV;
4875
4876        return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4877}
4878
4879static const struct file_operations tracing_saved_cmdlines_fops = {
4880        .open           = tracing_saved_cmdlines_open,
4881        .read           = seq_read,
4882        .llseek         = seq_lseek,
4883        .release        = seq_release,
4884};
4885
4886static ssize_t
4887tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4888                                 size_t cnt, loff_t *ppos)
4889{
4890        char buf[64];
4891        int r;
4892
4893        arch_spin_lock(&trace_cmdline_lock);
4894        r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4895        arch_spin_unlock(&trace_cmdline_lock);
4896
4897        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4898}
4899
4900static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4901{
4902        kfree(s->saved_cmdlines);
4903        kfree(s->map_cmdline_to_pid);
4904        kfree(s);
4905}
4906
4907static int tracing_resize_saved_cmdlines(unsigned int val)
4908{
4909        struct saved_cmdlines_buffer *s, *savedcmd_temp;
4910
4911        s = kmalloc(sizeof(*s), GFP_KERNEL);
4912        if (!s)
4913                return -ENOMEM;
4914
4915        if (allocate_cmdlines_buffer(val, s) < 0) {
4916                kfree(s);
4917                return -ENOMEM;
4918        }
4919
4920        arch_spin_lock(&trace_cmdline_lock);
4921        savedcmd_temp = savedcmd;
4922        savedcmd = s;
4923        arch_spin_unlock(&trace_cmdline_lock);
4924        free_saved_cmdlines_buffer(savedcmd_temp);
4925
4926        return 0;
4927}
4928
4929static ssize_t
4930tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4931                                  size_t cnt, loff_t *ppos)
4932{
4933        unsigned long val;
4934        int ret;
4935
4936        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4937        if (ret)
4938                return ret;
4939
4940        /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4941        if (!val || val > PID_MAX_DEFAULT)
4942                return -EINVAL;
4943
4944        ret = tracing_resize_saved_cmdlines((unsigned int)val);
4945        if (ret < 0)
4946                return ret;
4947
4948        *ppos += cnt;
4949
4950        return cnt;
4951}
4952
4953static const struct file_operations tracing_saved_cmdlines_size_fops = {
4954        .open           = tracing_open_generic,
4955        .read           = tracing_saved_cmdlines_size_read,
4956        .write          = tracing_saved_cmdlines_size_write,
4957};
4958
4959#ifdef CONFIG_TRACE_EVAL_MAP_FILE
4960static union trace_eval_map_item *
4961update_eval_map(union trace_eval_map_item *ptr)
4962{
4963        if (!ptr->map.eval_string) {
4964                if (ptr->tail.next) {
4965                        ptr = ptr->tail.next;
4966                        /* Set ptr to the next real item (skip head) */
4967                        ptr++;
4968                } else
4969                        return NULL;
4970        }
4971        return ptr;
4972}
4973
4974static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
4975{
4976        union trace_eval_map_item *ptr = v;
4977
4978        /*
4979         * Paranoid! If ptr points to end, we don't want to increment past it.
4980         * This really should never happen.
4981         */
4982        ptr = update_eval_map(ptr);
4983        if (WARN_ON_ONCE(!ptr))
4984                return NULL;
4985
4986        ptr++;
4987
4988        (*pos)++;
4989
4990        ptr = update_eval_map(ptr);
4991
4992        return ptr;
4993}
4994
4995static void *eval_map_start(struct seq_file *m, loff_t *pos)
4996{
4997        union trace_eval_map_item *v;
4998        loff_t l = 0;
4999
5000        mutex_lock(&trace_eval_mutex);
5001
5002        v = trace_eval_maps;
5003        if (v)
5004                v++;
5005
5006        while (v && l < *pos) {
5007                v = eval_map_next(m, v, &l);
5008        }
5009
5010        return v;
5011}
5012
5013static void eval_map_stop(struct seq_file *m, void *v)
5014{
5015        mutex_unlock(&trace_eval_mutex);
5016}
5017
5018static int eval_map_show(struct seq_file *m, void *v)
5019{
5020        union trace_eval_map_item *ptr = v;
5021
5022        seq_printf(m, "%s %ld (%s)\n",
5023                   ptr->map.eval_string, ptr->map.eval_value,
5024                   ptr->map.system);
5025
5026        return 0;
5027}
5028
5029static const struct seq_operations tracing_eval_map_seq_ops = {
5030        .start          = eval_map_start,
5031        .next           = eval_map_next,
5032        .stop           = eval_map_stop,
5033        .show           = eval_map_show,
5034};
5035
5036static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5037{
5038        if (tracing_disabled)
5039                return -ENODEV;
5040
5041        return seq_open(filp, &tracing_eval_map_seq_ops);
5042}
5043
5044static const struct file_operations tracing_eval_map_fops = {
5045        .open           = tracing_eval_map_open,
5046        .read           = seq_read,
5047        .llseek         = seq_lseek,
5048        .release        = seq_release,
5049};
5050
5051static inline union trace_eval_map_item *
5052trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5053{
5054        /* Return tail of array given the head */
5055        return ptr + ptr->head.length + 1;
5056}
5057
5058static void
5059trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5060                           int len)
5061{
5062        struct trace_eval_map **stop;
5063        struct trace_eval_map **map;
5064        union trace_eval_map_item *map_array;
5065        union trace_eval_map_item *ptr;
5066
5067        stop = start + len;
5068
5069        /*
5070         * The trace_eval_maps contains the map plus a head and tail item,
5071         * where the head holds the module and length of array, and the
5072         * tail holds a pointer to the next list.
5073         */
5074        map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
5075        if (!map_array) {
5076                pr_warn("Unable to allocate trace eval mapping\n");
5077                return;
5078        }
5079
5080        mutex_lock(&trace_eval_mutex);
5081
5082        if (!trace_eval_maps)
5083                trace_eval_maps = map_array;
5084        else {
5085                ptr = trace_eval_maps;
5086                for (;;) {
5087                        ptr = trace_eval_jmp_to_tail(ptr);
5088                        if (!ptr->tail.next)
5089                                break;
5090                        ptr = ptr->tail.next;
5091
5092                }
5093                ptr->tail.next = map_array;
5094        }
5095        map_array->head.mod = mod;
5096        map_array->head.length = len;
5097        map_array++;
5098
5099        for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5100                map_array->map = **map;
5101                map_array++;
5102        }
5103        memset(map_array, 0, sizeof(*map_array));
5104
5105        mutex_unlock(&trace_eval_mutex);
5106}
5107
5108static void trace_create_eval_file(struct dentry *d_tracer)
5109{
5110        trace_create_file("eval_map", 0444, d_tracer,
5111                          NULL, &tracing_eval_map_fops);
5112}
5113
5114#else /* CONFIG_TRACE_EVAL_MAP_FILE */
5115static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5116static inline void trace_insert_eval_map_file(struct module *mod,
5117                              struct trace_eval_map **start, int len) { }
5118#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5119
5120static void trace_insert_eval_map(struct module *mod,
5121                                  struct trace_eval_map **start, int len)
5122{
5123        struct trace_eval_map **map;
5124
5125        if (len <= 0)
5126                return;
5127
5128        map = start;
5129
5130        trace_event_eval_update(map, len);
5131
5132        trace_insert_eval_map_file(mod, start, len);
5133}
5134
5135static ssize_t
5136tracing_set_trace_read(struct file *filp, char __user *ubuf,
5137                       size_t cnt, loff_t *ppos)
5138{
5139        struct trace_array *tr = filp->private_data;
5140        char buf[MAX_TRACER_SIZE+2];
5141        int r;
5142
5143        mutex_lock(&trace_types_lock);
5144        r = sprintf(buf, "%s\n", tr->current_trace->name);
5145        mutex_unlock(&trace_types_lock);
5146
5147        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5148}
5149
5150int tracer_init(struct tracer *t, struct trace_array *tr)
5151{
5152        tracing_reset_online_cpus(&tr->trace_buffer);
5153        return t->init(tr);
5154}
5155
5156static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5157{
5158        int cpu;
5159
5160        for_each_tracing_cpu(cpu)
5161                per_cpu_ptr(buf->data, cpu)->entries = val;
5162}
5163
5164#ifdef CONFIG_TRACER_MAX_TRACE
5165/* resize @tr's buffer to the size of @size_tr's entries */
5166static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5167                                        struct trace_buffer *size_buf, int cpu_id)
5168{
5169        int cpu, ret = 0;
5170
5171        if (cpu_id == RING_BUFFER_ALL_CPUS) {
5172                for_each_tracing_cpu(cpu) {
5173                        ret = ring_buffer_resize(trace_buf->buffer,
5174                                 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5175                        if (ret < 0)
5176                                break;
5177                        per_cpu_ptr(trace_buf->data, cpu)->entries =
5178                                per_cpu_ptr(size_buf->data, cpu)->entries;
5179                }
5180        } else {
5181                ret = ring_buffer_resize(trace_buf->buffer,
5182                                 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5183                if (ret == 0)
5184                        per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5185                                per_cpu_ptr(size_buf->data, cpu_id)->entries;
5186        }
5187
5188        return ret;
5189}
5190#endif /* CONFIG_TRACER_MAX_TRACE */
5191
5192static int __tracing_resize_ring_buffer(struct trace_array *tr,
5193                                        unsigned long size, int cpu)
5194{
5195        int ret;
5196
5197        /*
5198         * If kernel or user changes the size of the ring buffer
5199         * we use the size that was given, and we can forget about
5200         * expanding it later.
5201         */
5202        ring_buffer_expanded = true;
5203
5204        /* May be called before buffers are initialized */
5205        if (!tr->trace_buffer.buffer)
5206                return 0;
5207
5208        ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5209        if (ret < 0)
5210                return ret;
5211
5212#ifdef CONFIG_TRACER_MAX_TRACE
5213        if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5214            !tr->current_trace->use_max_tr)
5215                goto out;
5216
5217        ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5218        if (ret < 0) {
5219                int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5220                                                     &tr->trace_buffer, cpu);
5221                if (r < 0) {
5222                        /*
5223                         * AARGH! We are left with different
5224                         * size max buffer!!!!
5225                         * The max buffer is our "snapshot" buffer.
5226                         * When a tracer needs a snapshot (one of the
5227                         * latency tracers), it swaps the max buffer
5228                         * with the saved snap shot. We succeeded to
5229                         * update the size of the main buffer, but failed to
5230                         * update the size of the max buffer. But when we tried
5231                         * to reset the main buffer to the original size, we
5232                         * failed there too. This is very unlikely to
5233                         * happen, but if it does, warn and kill all
5234                         * tracing.
5235                         */
5236                        WARN_ON(1);
5237                        tracing_disabled = 1;
5238                }
5239                return ret;
5240        }
5241
5242        if (cpu == RING_BUFFER_ALL_CPUS)
5243                set_buffer_entries(&tr->max_buffer, size);
5244        else
5245                per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5246
5247 out:
5248#endif /* CONFIG_TRACER_MAX_TRACE */
5249
5250        if (cpu == RING_BUFFER_ALL_CPUS)
5251                set_buffer_entries(&tr->trace_buffer, size);
5252        else
5253                per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5254
5255        return ret;
5256}
5257
5258static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5259                                          unsigned long size, int cpu_id)
5260{
5261        int ret = size;
5262
5263        mutex_lock(&trace_types_lock);
5264
5265        if (cpu_id != RING_BUFFER_ALL_CPUS) {
5266                /* make sure, this cpu is enabled in the mask */
5267                if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5268                        ret = -EINVAL;
5269                        goto out;
5270                }
5271        }
5272
5273        ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5274        if (ret < 0)
5275                ret = -ENOMEM;
5276
5277out:
5278        mutex_unlock(&trace_types_lock);
5279
5280        return ret;
5281}
5282
5283
5284/**
5285 * tracing_update_buffers - used by tracing facility to expand ring buffers
5286 *
5287 * To save on memory when the tracing is never used on a system with it
5288 * configured in. The ring buffers are set to a minimum size. But once
5289 * a user starts to use the tracing facility, then they need to grow
5290 * to their default size.
5291 *
5292 * This function is to be called when a tracer is about to be used.
5293 */
5294int tracing_update_buffers(void)
5295{
5296        int ret = 0;
5297
5298        mutex_lock(&trace_types_lock);
5299        if (!ring_buffer_expanded)
5300                ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5301                                                RING_BUFFER_ALL_CPUS);
5302        mutex_unlock(&trace_types_lock);
5303
5304        return ret;
5305}
5306
5307struct trace_option_dentry;
5308
5309static void
5310create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5311
5312/*
5313 * Used to clear out the tracer before deletion of an instance.
5314 * Must have trace_types_lock held.
5315 */
5316static void tracing_set_nop(struct trace_array *tr)
5317{
5318        if (tr->current_trace == &nop_trace)
5319                return;
5320        
5321        tr->current_trace->enabled--;
5322
5323        if (tr->current_trace->reset)
5324                tr->current_trace->reset(tr);
5325
5326        tr->current_trace = &nop_trace;
5327}
5328
5329static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5330{
5331        /* Only enable if the directory has been created already. */
5332        if (!tr->dir)
5333                return;
5334
5335        create_trace_option_files(tr, t);
5336}
5337
5338static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5339{
5340        struct tracer *t;
5341#ifdef CONFIG_TRACER_MAX_TRACE
5342        bool had_max_tr;
5343#endif
5344        int ret = 0;
5345
5346        mutex_lock(&trace_types_lock);
5347
5348        if (!ring_buffer_expanded) {
5349                ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5350                                                RING_BUFFER_ALL_CPUS);
5351                if (ret < 0)
5352                        goto out;
5353                ret = 0;
5354        }
5355
5356        for (t = trace_types; t; t = t->next) {
5357                if (strcmp(t->name, buf) == 0)
5358                        break;
5359        }
5360        if (!t) {
5361                ret = -EINVAL;
5362                goto out;
5363        }
5364        if (t == tr->current_trace)
5365                goto out;
5366
5367        /* Some tracers won't work on kernel command line */
5368        if (system_state < SYSTEM_RUNNING && t->noboot) {
5369                pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5370                        t->name);
5371                goto out;
5372        }
5373
5374        /* Some tracers are only allowed for the top level buffer */
5375        if (!trace_ok_for_array(t, tr)) {
5376                ret = -EINVAL;
5377                goto out;
5378        }
5379
5380        /* If trace pipe files are being read, we can't change the tracer */
5381        if (tr->current_trace->ref) {
5382                ret = -EBUSY;
5383                goto out;
5384        }
5385
5386        trace_branch_disable();
5387
5388        tr->current_trace->enabled--;
5389
5390        if (tr->current_trace->reset)
5391                tr->current_trace->reset(tr);
5392
5393        /* Current trace needs to be nop_trace before synchronize_sched */
5394        tr->current_trace = &nop_trace;
5395
5396#ifdef CONFIG_TRACER_MAX_TRACE
5397        had_max_tr = tr->allocated_snapshot;
5398
5399        if (had_max_tr && !t->use_max_tr) {
5400                /*
5401                 * We need to make sure that the update_max_tr sees that
5402                 * current_trace changed to nop_trace to keep it from
5403                 * swapping the buffers after we resize it.
5404                 * The update_max_tr is called from interrupts disabled
5405                 * so a synchronized_sched() is sufficient.
5406                 */
5407                synchronize_sched();
5408                free_snapshot(tr);
5409        }
5410#endif
5411
5412#ifdef CONFIG_TRACER_MAX_TRACE
5413        if (t->use_max_tr && !had_max_tr) {
5414                ret = alloc_snapshot(tr);
5415                if (ret < 0)
5416                        goto out;
5417        }
5418#endif
5419
5420        if (t->init) {
5421                ret = tracer_init(t, tr);
5422                if (ret)
5423                        goto out;
5424        }
5425
5426        tr->current_trace = t;
5427        tr->current_trace->enabled++;
5428        trace_branch_enable(tr);
5429 out:
5430        mutex_unlock(&trace_types_lock);
5431
5432        return ret;
5433}
5434
5435static ssize_t
5436tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5437                        size_t cnt, loff_t *ppos)
5438{
5439        struct trace_array *tr = filp->private_data;
5440        char buf[MAX_TRACER_SIZE+1];
5441        int i;
5442        size_t ret;
5443        int err;
5444
5445        ret = cnt;
5446
5447        if (cnt > MAX_TRACER_SIZE)
5448                cnt = MAX_TRACER_SIZE;
5449
5450        if (copy_from_user(buf, ubuf, cnt))
5451                return -EFAULT;
5452
5453        buf[cnt] = 0;
5454
5455        /* strip ending whitespace. */
5456        for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5457                buf[i] = 0;
5458
5459        err = tracing_set_tracer(tr, buf);
5460        if (err)
5461                return err;
5462
5463        *ppos += ret;
5464
5465        return ret;
5466}
5467
5468static ssize_t
5469tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5470                   size_t cnt, loff_t *ppos)
5471{
5472        char buf[64];
5473        int r;
5474
5475        r = snprintf(buf, sizeof(buf), "%ld\n",
5476                     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5477        if (r > sizeof(buf))
5478                r = sizeof(buf);
5479        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5480}
5481
5482static ssize_t
5483tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5484                    size_t cnt, loff_t *ppos)
5485{
5486        unsigned long val;
5487        int ret;
5488
5489        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5490        if (ret)
5491                return ret;
5492
5493        *ptr = val * 1000;
5494
5495        return cnt;
5496}
5497
5498static ssize_t
5499tracing_thresh_read(struct file *filp, char __user *ubuf,
5500                    size_t cnt, loff_t *ppos)
5501{
5502        return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5503}
5504
5505static ssize_t
5506tracing_thresh_write(struct file *filp, const char __user *ubuf,
5507                     size_t cnt, loff_t *ppos)
5508{
5509        struct trace_array *tr = filp->private_data;
5510        int ret;
5511
5512        mutex_lock(&trace_types_lock);
5513        ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5514        if (ret < 0)
5515                goto out;
5516
5517        if (tr->current_trace->update_thresh) {
5518                ret = tr->current_trace->update_thresh(tr);
5519                if (ret < 0)
5520                        goto out;
5521        }
5522
5523        ret = cnt;
5524out:
5525        mutex_unlock(&trace_types_lock);
5526
5527        return ret;
5528}
5529
5530#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5531
5532static ssize_t
5533tracing_max_lat_read(struct file *filp, char __user *ubuf,
5534                     size_t cnt, loff_t *ppos)
5535{
5536        return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5537}
5538
5539static ssize_t
5540tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5541                      size_t cnt, loff_t *ppos)
5542{
5543        return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5544}
5545
5546#endif
5547
5548static int tracing_open_pipe(struct inode *inode, struct file *filp)
5549{
5550        struct trace_array *tr = inode->i_private;
5551        struct trace_iterator *iter;
5552        int ret = 0;
5553
5554        if (tracing_disabled)
5555                return -ENODEV;
5556
5557        if (trace_array_get(tr) < 0)
5558                return -ENODEV;
5559
5560        mutex_lock(&trace_types_lock);
5561
5562        /* create a buffer to store the information to pass to userspace */
5563        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5564        if (!iter) {
5565                ret = -ENOMEM;
5566                __trace_array_put(tr);
5567                goto out;
5568        }
5569
5570        trace_seq_init(&iter->seq);
5571        iter->trace = tr->current_trace;
5572
5573        if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5574                ret = -ENOMEM;
5575                goto fail;
5576        }
5577
5578        /* trace pipe does not show start of buffer */
5579        cpumask_setall(iter->started);
5580
5581        if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5582                iter->iter_flags |= TRACE_FILE_LAT_FMT;
5583
5584        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5585        if (trace_clocks[tr->clock_id].in_ns)
5586                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5587
5588        iter->tr = tr;
5589        iter->trace_buffer = &tr->trace_buffer;
5590        iter->cpu_file = tracing_get_cpu(inode);
5591        mutex_init(&iter->mutex);
5592        filp->private_data = iter;
5593
5594        if (iter->trace->pipe_open)
5595                iter->trace->pipe_open(iter);
5596
5597        nonseekable_open(inode, filp);
5598
5599        tr->current_trace->ref++;
5600out:
5601        mutex_unlock(&trace_types_lock);
5602        return ret;
5603
5604fail:
5605        kfree(iter->trace);
5606        kfree(iter);
5607        __trace_array_put(tr);
5608        mutex_unlock(&trace_types_lock);
5609        return ret;
5610}
5611
5612static int tracing_release_pipe(struct inode *inode, struct file *file)
5613{
5614        struct trace_iterator *iter = file->private_data;
5615        struct trace_array *tr = inode->i_private;
5616
5617        mutex_lock(&trace_types_lock);
5618
5619        tr->current_trace->ref--;
5620
5621        if (iter->trace->pipe_close)
5622                iter->trace->pipe_close(iter);
5623
5624        mutex_unlock(&trace_types_lock);
5625
5626        free_cpumask_var(iter->started);
5627        mutex_destroy(&iter->mutex);
5628        kfree(iter);
5629
5630        trace_array_put(tr);
5631
5632        return 0;
5633}
5634
5635static unsigned int
5636trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5637{
5638        struct trace_array *tr = iter->tr;
5639
5640        /* Iterators are static, they should be filled or empty */
5641        if (trace_buffer_iter(iter, iter->cpu_file))
5642                return POLLIN | POLLRDNORM;
5643
5644        if (tr->trace_flags & TRACE_ITER_BLOCK)
5645                /*
5646                 * Always select as readable when in blocking mode
5647                 */
5648                return POLLIN | POLLRDNORM;
5649        else
5650                return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5651                                             filp, poll_table);
5652}
5653
5654static unsigned int
5655tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5656{
5657        struct trace_iterator *iter = filp->private_data;
5658
5659        return trace_poll(iter, filp, poll_table);
5660}
5661
5662/* Must be called with iter->mutex held. */
5663static int tracing_wait_pipe(struct file *filp)
5664{
5665        struct trace_iterator *iter = filp->private_data;
5666        int ret;
5667
5668        while (trace_empty(iter)) {
5669
5670                if ((filp->f_flags & O_NONBLOCK)) {
5671                        return -EAGAIN;
5672                }
5673
5674                /*
5675                 * We block until we read something and tracing is disabled.
5676                 * We still block if tracing is disabled, but we have never
5677                 * read anything. This allows a user to cat this file, and
5678                 * then enable tracing. But after we have read something,
5679                 * we give an EOF when tracing is again disabled.
5680                 *
5681                 * iter->pos will be 0 if we haven't read anything.
5682                 */
5683                if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5684                        break;
5685
5686                mutex_unlock(&iter->mutex);
5687
5688                ret = wait_on_pipe(iter, false);
5689
5690                mutex_lock(&iter->mutex);
5691
5692                if (ret)
5693                        return ret;
5694        }
5695
5696        return 1;
5697}
5698
5699/*
5700 * Consumer reader.
5701 */
5702static ssize_t
5703tracing_read_pipe(struct file *filp, char __user *ubuf,
5704                  size_t cnt, loff_t *ppos)
5705{
5706        struct trace_iterator *iter = filp->private_data;
5707        ssize_t sret;
5708
5709        /*
5710         * Avoid more than one consumer on a single file descriptor
5711         * This is just a matter of traces coherency, the ring buffer itself
5712         * is protected.
5713         */
5714        mutex_lock(&iter->mutex);
5715
5716        /* return any leftover data */
5717        sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5718        if (sret != -EBUSY)
5719                goto out;
5720
5721        trace_seq_init(&iter->seq);
5722
5723        if (iter->trace->read) {
5724                sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5725                if (sret)
5726                        goto out;
5727        }
5728
5729waitagain:
5730        sret = tracing_wait_pipe(filp);
5731        if (sret <= 0)
5732                goto out;
5733
5734        /* stop when tracing is finished */
5735        if (trace_empty(iter)) {
5736                sret = 0;
5737                goto out;
5738        }
5739
5740        if (cnt >= PAGE_SIZE)
5741                cnt = PAGE_SIZE - 1;
5742
5743        /* reset all but tr, trace, and overruns */
5744        memset(&iter->seq, 0,
5745               sizeof(struct trace_iterator) -
5746               offsetof(struct trace_iterator, seq));
5747        cpumask_clear(iter->started);
5748        iter->pos = -1;
5749
5750        trace_event_read_lock();
5751        trace_access_lock(iter->cpu_file);
5752        while (trace_find_next_entry_inc(iter) != NULL) {
5753                enum print_line_t ret;
5754                int save_len = iter->seq.seq.len;
5755
5756                ret = print_trace_line(iter);
5757                if (ret == TRACE_TYPE_PARTIAL_LINE) {
5758                        /* don't print partial lines */
5759                        iter->seq.seq.len = save_len;
5760                        break;
5761                }
5762                if (ret != TRACE_TYPE_NO_CONSUME)
5763                        trace_consume(iter);
5764
5765                if (trace_seq_used(&iter->seq) >= cnt)
5766                        break;
5767
5768                /*
5769                 * Setting the full flag means we reached the trace_seq buffer
5770                 * size and we should leave by partial output condition above.
5771                 * One of the trace_seq_* functions is not used properly.
5772                 */
5773                WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5774                          iter->ent->type);
5775        }
5776        trace_access_unlock(iter->cpu_file);
5777        trace_event_read_unlock();
5778
5779        /* Now copy what we have to the user */
5780        sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5781        if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5782                trace_seq_init(&iter->seq);
5783
5784        /*
5785         * If there was nothing to send to user, in spite of consuming trace
5786         * entries, go back to wait for more entries.
5787         */
5788        if (sret == -EBUSY)
5789                goto waitagain;
5790
5791out:
5792        mutex_unlock(&iter->mutex);
5793
5794        return sret;
5795}
5796
5797static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5798                                     unsigned int idx)
5799{
5800        __free_page(spd->pages[idx]);
5801}
5802
5803static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5804        .can_merge              = 0,
5805        .confirm                = generic_pipe_buf_confirm,
5806        .release                = generic_pipe_buf_release,
5807        .steal                  = generic_pipe_buf_steal,
5808        .get                    = generic_pipe_buf_get,
5809};
5810
5811static size_t
5812tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5813{
5814        size_t count;
5815        int save_len;
5816        int ret;
5817
5818        /* Seq buffer is page-sized, exactly what we need. */
5819        for (;;) {
5820                save_len = iter->seq.seq.len;
5821                ret = print_trace_line(iter);
5822
5823                if (trace_seq_has_overflowed(&iter->seq)) {
5824                        iter->seq.seq.len = save_len;
5825                        break;
5826                }
5827
5828                /*
5829                 * This should not be hit, because it should only
5830                 * be set if the iter->seq overflowed. But check it
5831                 * anyway to be safe.
5832                 */
5833                if (ret == TRACE_TYPE_PARTIAL_LINE) {
5834                        iter->seq.seq.len = save_len;
5835                        break;
5836                }
5837
5838                count = trace_seq_used(&iter->seq) - save_len;
5839                if (rem < count) {
5840                        rem = 0;
5841                        iter->seq.seq.len = save_len;
5842                        break;
5843                }
5844
5845                if (ret != TRACE_TYPE_NO_CONSUME)
5846                        trace_consume(iter);
5847                rem -= count;
5848                if (!trace_find_next_entry_inc(iter))   {
5849                        rem = 0;
5850                        iter->ent = NULL;
5851                        break;
5852                }
5853        }
5854
5855        return rem;
5856}
5857
5858static ssize_t tracing_splice_read_pipe(struct file *filp,
5859                                        loff_t *ppos,
5860                                        struct pipe_inode_info *pipe,
5861                                        size_t len,
5862                                        unsigned int flags)
5863{
5864        struct page *pages_def[PIPE_DEF_BUFFERS];
5865        struct partial_page partial_def[PIPE_DEF_BUFFERS];
5866        struct trace_iterator *iter = filp->private_data;
5867        struct splice_pipe_desc spd = {
5868                .pages          = pages_def,
5869                .partial        = partial_def,
5870                .nr_pages       = 0, /* This gets updated below. */
5871                .nr_pages_max   = PIPE_DEF_BUFFERS,
5872                .ops            = &tracing_pipe_buf_ops,
5873                .spd_release    = tracing_spd_release_pipe,
5874        };
5875        ssize_t ret;
5876        size_t rem;
5877        unsigned int i;
5878
5879        if (splice_grow_spd(pipe, &spd))
5880                return -ENOMEM;
5881
5882        mutex_lock(&iter->mutex);
5883
5884        if (iter->trace->splice_read) {
5885                ret = iter->trace->splice_read(iter, filp,
5886                                               ppos, pipe, len, flags);
5887                if (ret)
5888                        goto out_err;
5889        }
5890
5891        ret = tracing_wait_pipe(filp);
5892        if (ret <= 0)
5893                goto out_err;
5894
5895        if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5896                ret = -EFAULT;
5897                goto out_err;
5898        }
5899
5900        trace_event_read_lock();
5901        trace_access_lock(iter->cpu_file);
5902
5903        /* Fill as many pages as possible. */
5904        for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5905                spd.pages[i] = alloc_page(GFP_KERNEL);
5906                if (!spd.pages[i])
5907                        break;
5908
5909                rem = tracing_fill_pipe_page(rem, iter);
5910
5911                /* Copy the data into the page, so we can start over. */
5912                ret = trace_seq_to_buffer(&iter->seq,
5913                                          page_address(spd.pages[i]),
5914                                          trace_seq_used(&iter->seq));
5915                if (ret < 0) {
5916                        __free_page(spd.pages[i]);
5917                        break;
5918                }
5919                spd.partial[i].offset = 0;
5920                spd.partial[i].len = trace_seq_used(&iter->seq);
5921
5922                trace_seq_init(&iter->seq);
5923        }
5924
5925        trace_access_unlock(iter->cpu_file);
5926        trace_event_read_unlock();
5927        mutex_unlock(&iter->mutex);
5928
5929        spd.nr_pages = i;
5930
5931        if (i)
5932                ret = splice_to_pipe(pipe, &spd);
5933        else
5934                ret = 0;
5935out:
5936        splice_shrink_spd(&spd);
5937        return ret;
5938
5939out_err:
5940        mutex_unlock(&iter->mutex);
5941        goto out;
5942}
5943
5944static ssize_t
5945tracing_entries_read(struct file *filp, char __user *ubuf,
5946                     size_t cnt, loff_t *ppos)
5947{
5948        struct inode *inode = file_inode(filp);
5949        struct trace_array *tr = inode->i_private;
5950        int cpu = tracing_get_cpu(inode);
5951        char buf[64];
5952        int r = 0;
5953        ssize_t ret;
5954
5955        mutex_lock(&trace_types_lock);
5956
5957        if (cpu == RING_BUFFER_ALL_CPUS) {
5958                int cpu, buf_size_same;
5959                unsigned long size;
5960
5961                size = 0;
5962                buf_size_same = 1;
5963                /* check if all cpu sizes are same */
5964                for_each_tracing_cpu(cpu) {
5965                        /* fill in the size from first enabled cpu */
5966                        if (size == 0)
5967                                size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5968                        if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5969                                buf_size_same = 0;
5970                                break;
5971                        }
5972                }
5973
5974                if (buf_size_same) {
5975                        if (!ring_buffer_expanded)
5976                                r = sprintf(buf, "%lu (expanded: %lu)\n",
5977                                            size >> 10,
5978                                            trace_buf_size >> 10);
5979                        else
5980                                r = sprintf(buf, "%lu\n", size >> 10);
5981                } else
5982                        r = sprintf(buf, "X\n");
5983        } else
5984                r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5985
5986        mutex_unlock(&trace_types_lock);
5987
5988        ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5989        return ret;
5990}
5991
5992static ssize_t
5993tracing_entries_write(struct file *filp, const char __user *ubuf,
5994                      size_t cnt, loff_t *ppos)
5995{
5996        struct inode *inode = file_inode(filp);
5997        struct trace_array *tr = inode->i_private;
5998        unsigned long val;
5999        int ret;
6000
6001        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6002        if (ret)
6003                return ret;
6004
6005        /* must have at least 1 entry */
6006        if (!val)
6007                return -EINVAL;
6008
6009        /* value is in KB */
6010        val <<= 10;
6011        ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6012        if (ret < 0)
6013                return ret;
6014
6015        *ppos += cnt;
6016
6017        return cnt;
6018}
6019
6020static ssize_t
6021tracing_total_entries_read(struct file *filp, char __user *ubuf,
6022                                size_t cnt, loff_t *ppos)
6023{
6024        struct trace_array *tr = filp->private_data;
6025        char buf[64];
6026        int r, cpu;
6027        unsigned long size = 0, expanded_size = 0;
6028
6029        mutex_lock(&trace_types_lock);
6030        for_each_tracing_cpu(cpu) {
6031                size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6032                if (!ring_buffer_expanded)
6033                        expanded_size += trace_buf_size >> 10;
6034        }
6035        if (ring_buffer_expanded)
6036                r = sprintf(buf, "%lu\n", size);
6037        else
6038                r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6039        mutex_unlock(&trace_types_lock);
6040
6041        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6042}
6043
6044static ssize_t
6045tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6046                          size_t cnt, loff_t *ppos)
6047{
6048        /*
6049         * There is no need to read what the user has written, this function
6050         * is just to make sure that there is no error when "echo" is used
6051         */
6052
6053        *ppos += cnt;
6054
6055        return cnt;
6056}
6057
6058static int
6059tracing_free_buffer_release(struct inode *inode, struct file *filp)
6060{
6061        struct trace_array *tr = inode->i_private;
6062
6063        /* disable tracing ? */
6064        if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6065                tracer_tracing_off(tr);
6066        /* resize the ring buffer to 0 */
6067        tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6068
6069        trace_array_put(tr);
6070
6071        return 0;
6072}
6073
6074static ssize_t
6075tracing_mark_write(struct file *filp, const char __user *ubuf,
6076                                        size_t cnt, loff_t *fpos)
6077{
6078        struct trace_array *tr = filp->private_data;
6079        struct ring_buffer_event *event;
6080        struct ring_buffer *buffer;
6081        struct print_entry *entry;
6082        unsigned long irq_flags;
6083        const char faulted[] = "<faulted>";
6084        ssize_t written;
6085        int size;
6086        int len;
6087
6088/* Used in tracing_mark_raw_write() as well */
6089#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6090
6091        if (tracing_disabled)
6092                return -EINVAL;
6093
6094        if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6095                return -EINVAL;
6096
6097        if (cnt > TRACE_BUF_SIZE)
6098                cnt = TRACE_BUF_SIZE;
6099
6100        BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6101
6102        local_save_flags(irq_flags);
6103        size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6104
6105        /* If less than "<faulted>", then make sure we can still add that */
6106        if (cnt < FAULTED_SIZE)
6107                size += FAULTED_SIZE - cnt;
6108
6109        buffer = tr->trace_buffer.buffer;
6110        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6111                                            irq_flags, preempt_count());
6112        if (unlikely(!event))
6113                /* Ring buffer disabled, return as if not open for write */
6114                return -EBADF;
6115
6116        entry = ring_buffer_event_data(event);
6117        entry->ip = _THIS_IP_;
6118
6119        len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6120        if (len) {
6121                memcpy(&entry->buf, faulted, FAULTED_SIZE);
6122                cnt = FAULTED_SIZE;
6123                written = -EFAULT;
6124        } else
6125                written = cnt;
6126        len = cnt;
6127
6128        if (entry->buf[cnt - 1] != '\n') {
6129                entry->buf[cnt] = '\n';
6130                entry->buf[cnt + 1] = '\0';
6131        } else
6132                entry->buf[cnt] = '\0';
6133
6134        __buffer_unlock_commit(buffer, event);
6135
6136        if (written > 0)
6137                *fpos += written;
6138
6139        return written;
6140}
6141
6142/* Limit it for now to 3K (including tag) */
6143#define RAW_DATA_MAX_SIZE (1024*3)
6144
6145static ssize_t
6146tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6147                                        size_t cnt, loff_t *fpos)
6148{
6149        struct trace_array *tr = filp->private_data;
6150        struct ring_buffer_event *event;
6151        struct ring_buffer *buffer;
6152        struct raw_data_entry *entry;
6153        const char faulted[] = "<faulted>";
6154        unsigned long irq_flags;
6155        ssize_t written;
6156        int size;
6157        int len;
6158
6159#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6160
6161        if (tracing_disabled)
6162                return -EINVAL;
6163
6164        if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6165                return -EINVAL;
6166
6167        /* The marker must at least have a tag id */
6168        if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6169                return -EINVAL;
6170
6171        if (cnt > TRACE_BUF_SIZE)
6172                cnt = TRACE_BUF_SIZE;
6173
6174        BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6175
6176        local_save_flags(irq_flags);
6177        size = sizeof(*entry) + cnt;
6178        if (cnt < FAULT_SIZE_ID)
6179                size += FAULT_SIZE_ID - cnt;
6180
6181        buffer = tr->trace_buffer.buffer;
6182        event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6183                                            irq_flags, preempt_count());
6184        if (!event)
6185                /* Ring buffer disabled, return as if not open for write */
6186                return -EBADF;
6187
6188        entry = ring_buffer_event_data(event);
6189
6190        len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6191        if (len) {
6192                entry->id = -1;
6193                memcpy(&entry->buf, faulted, FAULTED_SIZE);
6194                written = -EFAULT;
6195        } else
6196                written = cnt;
6197
6198        __buffer_unlock_commit(buffer, event);
6199
6200        if (written > 0)
6201                *fpos += written;
6202
6203        return written;
6204}
6205
6206static int tracing_clock_show(struct seq_file *m, void *v)
6207{
6208        struct trace_array *tr = m->private;
6209        int i;
6210
6211        for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6212                seq_printf(m,
6213                        "%s%s%s%s", i ? " " : "",
6214                        i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6215                        i == tr->clock_id ? "]" : "");
6216        seq_putc(m, '\n');
6217
6218        return 0;
6219}
6220
6221static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6222{
6223        int i;
6224
6225        for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6226                if (strcmp(trace_clocks[i].name, clockstr) == 0)
6227                        break;
6228        }
6229        if (i == ARRAY_SIZE(trace_clocks))
6230                return -EINVAL;
6231
6232        mutex_lock(&trace_types_lock);
6233
6234        tr->clock_id = i;
6235
6236        ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6237
6238        /*
6239         * New clock may not be consistent with the previous clock.
6240         * Reset the buffer so that it doesn't have incomparable timestamps.
6241         */
6242        tracing_reset_online_cpus(&tr->trace_buffer);
6243
6244#ifdef CONFIG_TRACER_MAX_TRACE
6245        if (tr->max_buffer.buffer)
6246                ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6247        tracing_reset_online_cpus(&tr->max_buffer);
6248#endif
6249
6250        mutex_unlock(&trace_types_lock);
6251
6252        return 0;
6253}
6254
6255static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6256                                   size_t cnt, loff_t *fpos)
6257{
6258        struct seq_file *m = filp->private_data;
6259        struct trace_array *tr = m->private;
6260        char buf[64];
6261        const char *clockstr;
6262        int ret;
6263
6264        if (cnt >= sizeof(buf))
6265                return -EINVAL;
6266
6267        if (copy_from_user(buf, ubuf, cnt))
6268                return -EFAULT;
6269
6270        buf[cnt] = 0;
6271
6272        clockstr = strstrip(buf);
6273
6274        ret = tracing_set_clock(tr, clockstr);
6275        if (ret)
6276                return ret;
6277
6278        *fpos += cnt;
6279
6280        return cnt;
6281}
6282
6283static int tracing_clock_open(struct inode *inode, struct file *file)
6284{
6285        struct trace_array *tr = inode->i_private;
6286        int ret;
6287
6288        if (tracing_disabled)
6289                return -ENODEV;
6290
6291        if (trace_array_get(tr))
6292                return -ENODEV;
6293
6294        ret = single_open(file, tracing_clock_show, inode->i_private);
6295        if (ret < 0)
6296                trace_array_put(tr);
6297
6298        return ret;
6299}
6300
6301struct ftrace_buffer_info {
6302        struct trace_iterator   iter;
6303        void                    *spare;
6304        unsigned int            spare_cpu;
6305        unsigned int            read;
6306};
6307
6308#ifdef CONFIG_TRACER_SNAPSHOT
6309static int tracing_snapshot_open(struct inode *inode, struct file *file)
6310{
6311        struct trace_array *tr = inode->i_private;
6312        struct trace_iterator *iter;
6313        struct seq_file *m;
6314        int ret = 0;
6315
6316        if (trace_array_get(tr) < 0)
6317                return -ENODEV;
6318
6319        if (file->f_mode & FMODE_READ) {
6320                iter = __tracing_open(inode, file, true);
6321                if (IS_ERR(iter))
6322                        ret = PTR_ERR(iter);
6323        } else {
6324                /* Writes still need the seq_file to hold the private data */
6325                ret = -ENOMEM;
6326                m = kzalloc(sizeof(*m), GFP_KERNEL);
6327                if (!m)
6328                        goto out;
6329                iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6330                if (!iter) {
6331                        kfree(m);
6332                        goto out;
6333                }
6334                ret = 0;
6335
6336                iter->tr = tr;
6337                iter->trace_buffer = &tr->max_buffer;
6338                iter->cpu_file = tracing_get_cpu(inode);
6339                m->private = iter;
6340                file->private_data = m;
6341        }
6342out:
6343        if (ret < 0)
6344                trace_array_put(tr);
6345
6346        return ret;
6347}
6348
6349static ssize_t
6350tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6351                       loff_t *ppos)
6352{
6353        struct seq_file *m = filp->private_data;
6354        struct trace_iterator *iter = m->private;
6355        struct trace_array *tr = iter->tr;
6356        unsigned long val;
6357        int ret;
6358
6359        ret = tracing_update_buffers();
6360        if (ret < 0)
6361                return ret;
6362
6363        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6364        if (ret)
6365                return ret;
6366
6367        mutex_lock(&trace_types_lock);
6368
6369        if (tr->current_trace->use_max_tr) {
6370                ret = -EBUSY;
6371                goto out;
6372        }
6373
6374        switch (val) {
6375        case 0:
6376                if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6377                        ret = -EINVAL;
6378                        break;
6379                }
6380                if (tr->allocated_snapshot)
6381                        free_snapshot(tr);
6382                break;
6383        case 1:
6384/* Only allow per-cpu swap if the ring buffer supports it */
6385#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6386                if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6387                        ret = -EINVAL;
6388                        break;
6389                }
6390#endif
6391                if (!tr->allocated_snapshot) {
6392                        ret = alloc_snapshot(tr);
6393                        if (ret < 0)
6394                                break;
6395                }
6396                local_irq_disable();
6397                /* Now, we're going to swap */
6398                if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6399                        update_max_tr(tr, current, smp_processor_id());
6400                else
6401                        update_max_tr_single(tr, current, iter->cpu_file);
6402                local_irq_enable();
6403                break;
6404        default:
6405                if (tr->allocated_snapshot) {
6406                        if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6407                                tracing_reset_online_cpus(&tr->max_buffer);
6408                        else
6409                                tracing_reset(&tr->max_buffer, iter->cpu_file);
6410                }
6411                break;
6412        }
6413
6414        if (ret >= 0) {
6415                *ppos += cnt;
6416                ret = cnt;
6417        }
6418out:
6419        mutex_unlock(&trace_types_lock);
6420        return ret;
6421}
6422
6423static int tracing_snapshot_release(struct inode *inode, struct file *file)
6424{
6425        struct seq_file *m = file->private_data;
6426        int ret;
6427
6428        ret = tracing_release(inode, file);
6429
6430        if (file->f_mode & FMODE_READ)
6431                return ret;
6432
6433        /* If write only, the seq_file is just a stub */
6434        if (m)
6435                kfree(m->private);
6436        kfree(m);
6437
6438        return 0;
6439}
6440
6441static int tracing_buffers_open(struct inode *inode, struct file *filp);
6442static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6443                                    size_t count, loff_t *ppos);
6444static int tracing_buffers_release(struct inode *inode, struct file *file);
6445static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6446                   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6447
6448static int snapshot_raw_open(struct inode *inode, struct file *filp)
6449{
6450        struct ftrace_buffer_info *info;
6451        int ret;
6452
6453        ret = tracing_buffers_open(inode, filp);
6454        if (ret < 0)
6455                return ret;
6456
6457        info = filp->private_data;
6458
6459        if (info->iter.trace->use_max_tr) {
6460                tracing_buffers_release(inode, filp);
6461                return -EBUSY;
6462        }
6463
6464        info->iter.snapshot = true;
6465        info->iter.trace_buffer = &info->iter.tr->max_buffer;
6466
6467        return ret;
6468}
6469
6470#endif /* CONFIG_TRACER_SNAPSHOT */
6471
6472
6473static const struct file_operations tracing_thresh_fops = {
6474        .open           = tracing_open_generic,
6475        .read           = tracing_thresh_read,
6476        .write          = tracing_thresh_write,
6477        .llseek         = generic_file_llseek,
6478};
6479
6480#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6481static const struct file_operations tracing_max_lat_fops = {
6482        .open           = tracing_open_generic,
6483        .read           = tracing_max_lat_read,
6484        .write          = tracing_max_lat_write,
6485        .llseek         = generic_file_llseek,
6486};
6487#endif
6488
6489static const struct file_operations set_tracer_fops = {
6490        .open           = tracing_open_generic,
6491        .read           = tracing_set_trace_read,
6492        .write          = tracing_set_trace_write,
6493        .llseek         = generic_file_llseek,
6494};
6495
6496static const struct file_operations tracing_pipe_fops = {
6497        .open           = tracing_open_pipe,
6498        .poll           = tracing_poll_pipe,
6499        .read           = tracing_read_pipe,
6500        .splice_read    = tracing_splice_read_pipe,
6501        .release        = tracing_release_pipe,
6502        .llseek         = no_llseek,
6503};
6504
6505static const struct file_operations tracing_entries_fops = {
6506        .open           = tracing_open_generic_tr,
6507        .read           = tracing_entries_read,
6508        .write          = tracing_entries_write,
6509        .llseek         = generic_file_llseek,
6510        .release        = tracing_release_generic_tr,
6511};
6512
6513static const struct file_operations tracing_total_entries_fops = {
6514        .open           = tracing_open_generic_tr,
6515        .read           = tracing_total_entries_read,
6516        .llseek         = generic_file_llseek,
6517        .release        = tracing_release_generic_tr,
6518};
6519
6520static const struct file_operations tracing_free_buffer_fops = {
6521        .open           = tracing_open_generic_tr,
6522        .write          = tracing_free_buffer_write,
6523        .release        = tracing_free_buffer_release,
6524};
6525
6526static const struct file_operations tracing_mark_fops = {
6527        .open           = tracing_open_generic_tr,
6528        .write          = tracing_mark_write,
6529        .llseek         = generic_file_llseek,
6530        .release        = tracing_release_generic_tr,
6531};
6532
6533static const struct file_operations tracing_mark_raw_fops = {
6534        .open           = tracing_open_generic_tr,
6535        .write          = tracing_mark_raw_write,
6536        .llseek         = generic_file_llseek,
6537        .release        = tracing_release_generic_tr,
6538};
6539
6540static const struct file_operations trace_clock_fops = {
6541        .open           = tracing_clock_open,
6542        .read           = seq_read,
6543        .llseek         = seq_lseek,
6544        .release        = tracing_single_release_tr,
6545        .write          = tracing_clock_write,
6546};
6547
6548#ifdef CONFIG_TRACER_SNAPSHOT
6549static const struct file_operations snapshot_fops = {
6550        .open           = tracing_snapshot_open,
6551        .read           = seq_read,
6552        .write          = tracing_snapshot_write,
6553        .llseek         = tracing_lseek,
6554        .release        = tracing_snapshot_release,
6555};
6556
6557static const struct file_operations snapshot_raw_fops = {
6558        .open           = snapshot_raw_open,
6559        .read           = tracing_buffers_read,
6560        .release        = tracing_buffers_release,
6561        .splice_read    = tracing_buffers_splice_read,
6562        .llseek         = no_llseek,
6563};
6564
6565#endif /* CONFIG_TRACER_SNAPSHOT */
6566
6567static int tracing_buffers_open(struct inode *inode, struct file *filp)
6568{
6569        struct trace_array *tr = inode->i_private;
6570        struct ftrace_buffer_info *info;
6571        int ret;
6572
6573        if (tracing_disabled)
6574                return -ENODEV;
6575
6576        if (trace_array_get(tr) < 0)
6577                return -ENODEV;
6578
6579        info = kzalloc(sizeof(*info), GFP_KERNEL);
6580        if (!info) {
6581                trace_array_put(tr);
6582                return -ENOMEM;
6583        }
6584
6585        mutex_lock(&trace_types_lock);
6586
6587        info->iter.tr           = tr;
6588        info->iter.cpu_file     = tracing_get_cpu(inode);
6589        info->iter.trace        = tr->current_trace;
6590        info->iter.trace_buffer = &tr->trace_buffer;
6591        info->spare             = NULL;
6592        /* Force reading ring buffer for first read */
6593        info->read              = (unsigned int)-1;
6594
6595        filp->private_data = info;
6596
6597        tr->current_trace->ref++;
6598
6599        mutex_unlock(&trace_types_lock);
6600
6601        ret = nonseekable_open(inode, filp);
6602        if (ret < 0)
6603                trace_array_put(tr);
6604
6605        return ret;
6606}
6607
6608static unsigned int
6609tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6610{
6611        struct ftrace_buffer_info *info = filp->private_data;
6612        struct trace_iterator *iter = &info->iter;
6613
6614        return trace_poll(iter, filp, poll_table);
6615}
6616
6617static ssize_t
6618tracing_buffers_read(struct file *filp, char __user *ubuf,
6619                     size_t count, loff_t *ppos)
6620{
6621        struct ftrace_buffer_info *info = filp->private_data;
6622        struct trace_iterator *iter = &info->iter;
6623        ssize_t ret = 0;
6624        ssize_t size;
6625
6626        if (!count)
6627                return 0;
6628
6629#ifdef CONFIG_TRACER_MAX_TRACE
6630        if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6631                return -EBUSY;
6632#endif
6633
6634        if (!info->spare) {
6635                info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6636                                                          iter->cpu_file);
6637                if (IS_ERR(info->spare)) {
6638                        ret = PTR_ERR(info->spare);
6639                        info->spare = NULL;
6640                } else {
6641                        info->spare_cpu = iter->cpu_file;
6642                }
6643        }
6644        if (!info->spare)
6645                return ret;
6646
6647        /* Do we have previous read data to read? */
6648        if (info->read < PAGE_SIZE)
6649                goto read;
6650
6651 again:
6652        trace_access_lock(iter->cpu_file);
6653        ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6654                                    &info->spare,
6655                                    count,
6656                                    iter->cpu_file, 0);
6657        trace_access_unlock(iter->cpu_file);
6658
6659        if (ret < 0) {
6660                if (trace_empty(iter)) {
6661                        if ((filp->f_flags & O_NONBLOCK))
6662                                return -EAGAIN;
6663
6664                        ret = wait_on_pipe(iter, false);
6665                        if (ret)
6666                                return ret;
6667
6668                        goto again;
6669                }
6670                return 0;
6671        }
6672
6673        info->read = 0;
6674 read:
6675        size = PAGE_SIZE - info->read;
6676        if (size > count)
6677                size = count;
6678
6679        ret = copy_to_user(ubuf, info->spare + info->read, size);
6680        if (ret == size)
6681                return -EFAULT;
6682
6683        size -= ret;
6684
6685        *ppos += size;
6686        info->read += size;
6687
6688        return size;
6689}
6690
6691static int tracing_buffers_release(struct inode *inode, struct file *file)
6692{
6693        struct ftrace_buffer_info *info = file->private_data;
6694        struct trace_iterator *iter = &info->iter;
6695
6696        mutex_lock(&trace_types_lock);
6697
6698        iter->tr->current_trace->ref--;
6699
6700        __trace_array_put(iter->tr);
6701
6702        if (info->spare)
6703                ring_buffer_free_read_page(iter->trace_buffer->buffer,
6704                                           info->spare_cpu, info->spare);
6705        kfree(info);
6706
6707        mutex_unlock(&trace_types_lock);
6708
6709        return 0;
6710}
6711
6712struct buffer_ref {
6713        struct ring_buffer      *buffer;
6714        void                    *page;
6715        int                     cpu;
6716        int                     ref;
6717};
6718
6719static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6720                                    struct pipe_buffer *buf)
6721{
6722        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6723
6724        if (--ref->ref)
6725                return;
6726
6727        ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6728        kfree(ref);
6729        buf->private = 0;
6730}
6731
6732static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6733                                struct pipe_buffer *buf)
6734{
6735        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6736
6737        ref->ref++;
6738}
6739
6740/* Pipe buffer operations for a buffer. */
6741static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6742        .can_merge              = 0,
6743        .confirm                = generic_pipe_buf_confirm,
6744        .release                = buffer_pipe_buf_release,
6745        .steal                  = generic_pipe_buf_steal,
6746        .get                    = buffer_pipe_buf_get,
6747};
6748
6749/*
6750 * Callback from splice_to_pipe(), if we need to release some pages
6751 * at the end of the spd in case we error'ed out in filling the pipe.
6752 */
6753static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6754{
6755        struct buffer_ref *ref =
6756                (struct buffer_ref *)spd->partial[i].private;
6757
6758        if (--ref->ref)
6759                return;
6760
6761        ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6762        kfree(ref);
6763        spd->partial[i].private = 0;
6764}
6765
6766static ssize_t
6767tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6768                            struct pipe_inode_info *pipe, size_t len,
6769                            unsigned int flags)
6770{
6771        struct ftrace_buffer_info *info = file->private_data;
6772        struct trace_iterator *iter = &info->iter;
6773        struct partial_page partial_def[PIPE_DEF_BUFFERS];
6774        struct page *pages_def[PIPE_DEF_BUFFERS];
6775        struct splice_pipe_desc spd = {
6776                .pages          = pages_def,
6777                .partial        = partial_def,
6778                .nr_pages_max   = PIPE_DEF_BUFFERS,
6779                .ops            = &buffer_pipe_buf_ops,
6780                .spd_release    = buffer_spd_release,
6781        };
6782        struct buffer_ref *ref;
6783        int entries, size, i;
6784        ssize_t ret = 0;
6785
6786#ifdef CONFIG_TRACER_MAX_TRACE
6787        if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6788                return -EBUSY;
6789#endif
6790
6791        if (*ppos & (PAGE_SIZE - 1))
6792                return -EINVAL;
6793
6794        if (len & (PAGE_SIZE - 1)) {
6795                if (len < PAGE_SIZE)
6796                        return -EINVAL;
6797                len &= PAGE_MASK;
6798        }
6799
6800        if (splice_grow_spd(pipe, &spd))
6801                return -ENOMEM;
6802
6803 again:
6804        trace_access_lock(iter->cpu_file);
6805        entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6806
6807        for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6808                struct page *page;
6809                int r;
6810
6811                ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6812                if (!ref) {
6813                        ret = -ENOMEM;
6814                        break;
6815                }
6816
6817                ref->ref = 1;
6818                ref->buffer = iter->trace_buffer->buffer;
6819                ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6820                if (IS_ERR(ref->page)) {
6821                        ret = PTR_ERR(ref->page);
6822                        ref->page = NULL;
6823                        kfree(ref);
6824                        break;
6825                }
6826                ref->cpu = iter->cpu_file;
6827
6828                r = ring_buffer_read_page(ref->buffer, &ref->page,
6829                                          len, iter->cpu_file, 1);
6830                if (r < 0) {
6831                        ring_buffer_free_read_page(ref->buffer, ref->cpu,
6832                                                   ref->page);
6833                        kfree(ref);
6834                        break;
6835                }
6836
6837                /*
6838                 * zero out any left over data, this is going to
6839                 * user land.
6840                 */
6841                size = ring_buffer_page_len(ref->page);
6842                if (size < PAGE_SIZE)
6843                        memset(ref->page + size, 0, PAGE_SIZE - size);
6844
6845                page = virt_to_page(ref->page);
6846
6847                spd.pages[i] = page;
6848                spd.partial[i].len = PAGE_SIZE;
6849                spd.partial[i].offset = 0;
6850                spd.partial[i].private = (unsigned long)ref;
6851                spd.nr_pages++;
6852                *ppos += PAGE_SIZE;
6853
6854                entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6855        }
6856
6857        trace_access_unlock(iter->cpu_file);
6858        spd.nr_pages = i;
6859
6860        /* did we read anything? */
6861        if (!spd.nr_pages) {
6862                if (ret)
6863                        goto out;
6864
6865                ret = -EAGAIN;
6866                if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6867                        goto out;
6868
6869                ret = wait_on_pipe(iter, true);
6870                if (ret)
6871                        goto out;
6872
6873                goto again;
6874        }
6875
6876        ret = splice_to_pipe(pipe, &spd);
6877out:
6878        splice_shrink_spd(&spd);
6879
6880        return ret;
6881}
6882
6883static const struct file_operations tracing_buffers_fops = {
6884        .open           = tracing_buffers_open,
6885        .read           = tracing_buffers_read,
6886        .poll           = tracing_buffers_poll,
6887        .release        = tracing_buffers_release,
6888        .splice_read    = tracing_buffers_splice_read,
6889        .llseek         = no_llseek,
6890};
6891
6892static ssize_t
6893tracing_stats_read(struct file *filp, char __user *ubuf,
6894                   size_t count, loff_t *ppos)
6895{
6896        struct inode *inode = file_inode(filp);
6897        struct trace_array *tr = inode->i_private;
6898        struct trace_buffer *trace_buf = &tr->trace_buffer;
6899        int cpu = tracing_get_cpu(inode);
6900        struct trace_seq *s;
6901        unsigned long cnt;
6902        unsigned long long t;
6903        unsigned long usec_rem;
6904
6905        s = kmalloc(sizeof(*s), GFP_KERNEL);
6906        if (!s)
6907                return -ENOMEM;
6908
6909        trace_seq_init(s);
6910
6911        cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
6912        trace_seq_printf(s, "entries: %ld\n", cnt);
6913
6914        cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
6915        trace_seq_printf(s, "overrun: %ld\n", cnt);
6916
6917        cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
6918        trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6919
6920        cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
6921        trace_seq_printf(s, "bytes: %ld\n", cnt);
6922
6923        if (trace_clocks[tr->clock_id].in_ns) {
6924                /* local or global for trace_clock */
6925                t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6926                usec_rem = do_div(t, USEC_PER_SEC);
6927                trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6928                                                                t, usec_rem);
6929
6930                t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
6931                usec_rem = do_div(t, USEC_PER_SEC);
6932                trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6933        } else {
6934                /* counter or tsc mode for trace_clock */
6935                trace_seq_printf(s, "oldest event ts: %llu\n",
6936                                ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6937
6938                trace_seq_printf(s, "now ts: %llu\n",
6939                                ring_buffer_time_stamp(trace_buf->buffer, cpu));
6940        }
6941
6942        cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
6943        trace_seq_printf(s, "dropped events: %ld\n", cnt);
6944
6945        cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
6946        trace_seq_printf(s, "read events: %ld\n", cnt);
6947
6948        count = simple_read_from_buffer(ubuf, count, ppos,
6949                                        s->buffer, trace_seq_used(s));
6950
6951        kfree(s);
6952
6953        return count;
6954}
6955
6956static const struct file_operations tracing_stats_fops = {
6957        .open           = tracing_open_generic_tr,
6958        .read           = tracing_stats_read,
6959        .llseek         = generic_file_llseek,
6960        .release        = tracing_release_generic_tr,
6961};
6962
6963#ifdef CONFIG_DYNAMIC_FTRACE
6964
6965static ssize_t
6966tracing_read_dyn_info(struct file *filp, char __user *ubuf,
6967                  size_t cnt, loff_t *ppos)
6968{
6969        unsigned long *p = filp->private_data;
6970        char buf[64]; /* Not too big for a shallow stack */
6971        int r;
6972
6973        r = scnprintf(buf, 63, "%ld", *p);
6974        buf[r++] = '\n';
6975
6976        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6977}
6978
6979static const struct file_operations tracing_dyn_info_fops = {
6980        .open           = tracing_open_generic,
6981        .read           = tracing_read_dyn_info,
6982        .llseek         = generic_file_llseek,
6983};
6984#endif /* CONFIG_DYNAMIC_FTRACE */
6985
6986#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6987static void
6988ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
6989                struct trace_array *tr, struct ftrace_probe_ops *ops,
6990                void *data)
6991{
6992        tracing_snapshot_instance(tr);
6993}
6994
6995static void
6996ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
6997                      struct trace_array *tr, struct ftrace_probe_ops *ops,
6998                      void *data)
6999{
7000        struct ftrace_func_mapper *mapper = data;
7001        long *count = NULL;
7002
7003        if (mapper)
7004                count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7005
7006        if (count) {
7007
7008                if (*count <= 0)
7009                        return;
7010
7011                (*count)--;
7012        }
7013
7014        tracing_snapshot_instance(tr);
7015}
7016
7017static int
7018ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7019                      struct ftrace_probe_ops *ops, void *data)
7020{
7021        struct ftrace_func_mapper *mapper = data;
7022        long *count = NULL;
7023
7024        seq_printf(m, "%ps:", (void *)ip);
7025
7026        seq_puts(m, "snapshot");
7027
7028        if (mapper)
7029                count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7030
7031        if (count)
7032                seq_printf(m, ":count=%ld\n", *count);
7033        else
7034                seq_puts(m, ":unlimited\n");
7035
7036        return 0;
7037}
7038
7039static int
7040ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7041                     unsigned long ip, void *init_data, void **data)
7042{
7043        struct ftrace_func_mapper *mapper = *data;
7044
7045        if (!mapper) {
7046                mapper = allocate_ftrace_func_mapper();
7047                if (!mapper)
7048                        return -ENOMEM;
7049                *data = mapper;
7050        }
7051
7052        return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7053}
7054
7055static void
7056ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7057                     unsigned long ip, void *data)
7058{
7059        struct ftrace_func_mapper *mapper = data;
7060
7061        if (!ip) {
7062                if (!mapper)
7063                        return;
7064                free_ftrace_func_mapper(mapper, NULL);
7065                return;
7066        }
7067
7068        ftrace_func_mapper_remove_ip(mapper, ip);
7069}
7070
7071static struct ftrace_probe_ops snapshot_probe_ops = {
7072        .func                   = ftrace_snapshot,
7073        .print                  = ftrace_snapshot_print,
7074};
7075
7076static struct ftrace_probe_ops snapshot_count_probe_ops = {
7077        .func                   = ftrace_count_snapshot,
7078        .print                  = ftrace_snapshot_print,
7079        .init                   = ftrace_snapshot_init,
7080        .free                   = ftrace_snapshot_free,
7081};
7082
7083static int
7084ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7085                               char *glob, char *cmd, char *param, int enable)
7086{
7087        struct ftrace_probe_ops *ops;
7088        void *count = (void *)-1;
7089        char *number;
7090        int ret;
7091
7092        if (!tr)
7093                return -ENODEV;
7094
7095        /* hash funcs only work with set_ftrace_filter */
7096        if (!enable)
7097                return -EINVAL;
7098
7099        ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
7100
7101        if (glob[0] == '!')
7102                return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7103
7104        if (!param)
7105                goto out_reg;
7106
7107        number = strsep(&param, ":");
7108
7109        if (!strlen(number))
7110                goto out_reg;
7111
7112        /*
7113         * We use the callback data field (which is a pointer)
7114         * as our counter.
7115         */
7116        ret = kstrtoul(number, 0, (unsigned long *)&count);
7117        if (ret)
7118                return ret;
7119
7120 out_reg:
7121        ret = alloc_snapshot(tr);
7122        if (ret < 0)
7123                goto out;
7124
7125        ret = register_ftrace_function_probe(glob, tr, ops, count);
7126
7127 out:
7128        return ret < 0 ? ret : 0;
7129}
7130
7131static struct ftrace_func_command ftrace_snapshot_cmd = {
7132        .name                   = "snapshot",
7133        .func                   = ftrace_trace_snapshot_callback,
7134};
7135
7136static __init int register_snapshot_cmd(void)
7137{
7138        return register_ftrace_command(&ftrace_snapshot_cmd);
7139}
7140#else
7141static inline __init int register_snapshot_cmd(void) { return 0; }
7142#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7143
7144static struct dentry *tracing_get_dentry(struct trace_array *tr)
7145{
7146        if (WARN_ON(!tr->dir))
7147                return ERR_PTR(-ENODEV);
7148
7149        /* Top directory uses NULL as the parent */
7150        if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7151                return NULL;
7152
7153        /* All sub buffers have a descriptor */
7154        return tr->dir;
7155}
7156
7157static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7158{
7159        struct dentry *d_tracer;
7160
7161        if (tr->percpu_dir)
7162                return tr->percpu_dir;
7163
7164        d_tracer = tracing_get_dentry(tr);
7165        if (IS_ERR(d_tracer))
7166                return NULL;
7167
7168        tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7169
7170        WARN_ONCE(!tr->percpu_dir,
7171                  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7172
7173        return tr->percpu_dir;
7174}
7175
7176static struct dentry *
7177trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7178                      void *data, long cpu, const struct file_operations *fops)
7179{
7180        struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7181
7182        if (ret) /* See tracing_get_cpu() */
7183                d_inode(ret)->i_cdev = (void *)(cpu + 1);
7184        return ret;
7185}
7186
7187static void
7188tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7189{
7190        struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7191        struct dentry *d_cpu;
7192        char cpu_dir[30]; /* 30 characters should be more than enough */
7193
7194        if (!d_percpu)
7195                return;
7196
7197        snprintf(cpu_dir, 30, "cpu%ld", cpu);
7198        d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7199        if (!d_cpu) {
7200                pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7201                return;
7202        }
7203
7204        /* per cpu trace_pipe */
7205        trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7206                                tr, cpu, &tracing_pipe_fops);
7207
7208        /* per cpu trace */
7209        trace_create_cpu_file("trace", 0644, d_cpu,
7210                                tr, cpu, &tracing_fops);
7211
7212        trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7213                                tr, cpu, &tracing_buffers_fops);
7214
7215        trace_create_cpu_file("stats", 0444, d_cpu,
7216                                tr, cpu, &tracing_stats_fops);
7217
7218        trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7219                                tr, cpu, &tracing_entries_fops);
7220
7221#ifdef CONFIG_TRACER_SNAPSHOT
7222        trace_create_cpu_file("snapshot", 0644, d_cpu,
7223                                tr, cpu, &snapshot_fops);
7224
7225        trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7226                                tr, cpu, &snapshot_raw_fops);
7227#endif
7228}
7229
7230#ifdef CONFIG_FTRACE_SELFTEST
7231/* Let selftest have access to static functions in this file */
7232#include "trace_selftest.c"
7233#endif
7234
7235static ssize_t
7236trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7237                        loff_t *ppos)
7238{
7239        struct trace_option_dentry *topt = filp->private_data;
7240        char *buf;
7241
7242        if (topt->flags->val & topt->opt->bit)
7243                buf = "1\n";
7244        else
7245                buf = "0\n";
7246
7247        return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7248}
7249
7250static ssize_t
7251trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7252                         loff_t *ppos)
7253{
7254        struct trace_option_dentry *topt = filp->private_data;
7255        unsigned long val;
7256        int ret;
7257
7258        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7259        if (ret)
7260                return ret;
7261
7262        if (val != 0 && val != 1)
7263                return -EINVAL;
7264
7265        if (!!(topt->flags->val & topt->opt->bit) != val) {
7266                mutex_lock(&trace_types_lock);
7267                ret = __set_tracer_option(topt->tr, topt->flags,
7268                                          topt->opt, !val);
7269                mutex_unlock(&trace_types_lock);
7270                if (ret)
7271                        return ret;
7272        }
7273
7274        *ppos += cnt;
7275
7276        return cnt;
7277}
7278
7279
7280static const struct file_operations trace_options_fops = {
7281        .open = tracing_open_generic,
7282        .read = trace_options_read,
7283        .write = trace_options_write,
7284        .llseek = generic_file_llseek,
7285};
7286
7287/*
7288 * In order to pass in both the trace_array descriptor as well as the index
7289 * to the flag that the trace option file represents, the trace_array
7290 * has a character array of trace_flags_index[], which holds the index
7291 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7292 * The address of this character array is passed to the flag option file
7293 * read/write callbacks.
7294 *
7295 * In order to extract both the index and the trace_array descriptor,
7296 * get_tr_index() uses the following algorithm.
7297 *
7298 *   idx = *ptr;
7299 *
7300 * As the pointer itself contains the address of the index (remember
7301 * index[1] == 1).
7302 *
7303 * Then to get the trace_array descriptor, by subtracting that index
7304 * from the ptr, we get to the start of the index itself.
7305 *
7306 *   ptr - idx == &index[0]
7307 *
7308 * Then a simple container_of() from that pointer gets us to the
7309 * trace_array descriptor.
7310 */
7311static void get_tr_index(void *data, struct trace_array **ptr,
7312                         unsigned int *pindex)
7313{
7314        *pindex = *(unsigned char *)data;
7315
7316        *ptr = container_of(data - *pindex, struct trace_array,
7317                            trace_flags_index);
7318}
7319
7320static ssize_t
7321trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7322                        loff_t *ppos)
7323{
7324        void *tr_index = filp->private_data;
7325        struct trace_array *tr;
7326        unsigned int index;
7327        char *buf;
7328
7329        get_tr_index(tr_index, &tr, &index);
7330
7331        if (tr->trace_flags & (1 << index))
7332                buf = "1\n";
7333        else
7334                buf = "0\n";
7335
7336        return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7337}
7338
7339static ssize_t
7340trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7341                         loff_t *ppos)
7342{
7343        void *tr_index = filp->private_data;
7344        struct trace_array *tr;
7345        unsigned int index;
7346        unsigned long val;
7347        int ret;
7348
7349        get_tr_index(tr_index, &tr, &index);
7350
7351        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7352        if (ret)
7353                return ret;
7354
7355        if (val != 0 && val != 1)
7356                return -EINVAL;
7357
7358        mutex_lock(&trace_types_lock);
7359        ret = set_tracer_flag(tr, 1 << index, val);
7360        mutex_unlock(&trace_types_lock);
7361
7362        if (ret < 0)
7363                return ret;
7364
7365        *ppos += cnt;
7366
7367        return cnt;
7368}
7369
7370static const struct file_operations trace_options_core_fops = {
7371        .open = tracing_open_generic,
7372        .read = trace_options_core_read,
7373        .write = trace_options_core_write,
7374        .llseek = generic_file_llseek,
7375};
7376
7377struct dentry *trace_create_file(const char *name,
7378                                 umode_t mode,
7379                                 struct dentry *parent,
7380                                 void *data,
7381                                 const struct file_operations *fops)
7382{
7383        struct dentry *ret;
7384
7385        ret = tracefs_create_file(name, mode, parent, data, fops);
7386        if (!ret)
7387                pr_warn("Could not create tracefs '%s' entry\n", name);
7388
7389        return ret;
7390}
7391
7392
7393static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7394{
7395        struct dentry *d_tracer;
7396
7397        if (tr->options)
7398                return tr->options;
7399
7400        d_tracer = tracing_get_dentry(tr);
7401        if (IS_ERR(d_tracer))
7402                return NULL;
7403
7404        tr->options = tracefs_create_dir("options", d_tracer);
7405        if (!tr->options) {
7406                pr_warn("Could not create tracefs directory 'options'\n");
7407                return NULL;
7408        }
7409
7410        return tr->options;
7411}
7412
7413static void
7414create_trace_option_file(struct trace_array *tr,
7415                         struct trace_option_dentry *topt,
7416                         struct tracer_flags *flags,
7417                         struct tracer_opt *opt)
7418{
7419        struct dentry *t_options;
7420
7421        t_options = trace_options_init_dentry(tr);
7422        if (!t_options)
7423                return;
7424
7425        topt->flags = flags;
7426        topt->opt = opt;
7427        topt->tr = tr;
7428
7429        topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7430                                    &trace_options_fops);
7431
7432}
7433
7434static void
7435create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7436{
7437        struct trace_option_dentry *topts;
7438        struct trace_options *tr_topts;
7439        struct tracer_flags *flags;
7440        struct tracer_opt *opts;
7441        int cnt;
7442        int i;
7443
7444        if (!tracer)
7445                return;
7446
7447        flags = tracer->flags;
7448
7449        if (!flags || !flags->opts)
7450                return;
7451
7452        /*
7453         * If this is an instance, only create flags for tracers
7454         * the instance may have.
7455         */
7456        if (!trace_ok_for_array(tracer, tr))
7457                return;
7458
7459        for (i = 0; i < tr->nr_topts; i++) {
7460                /* Make sure there's no duplicate flags. */
7461                if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7462                        return;
7463        }
7464
7465        opts = flags->opts;
7466
7467        for (cnt = 0; opts[cnt].name; cnt++)
7468                ;
7469
7470        topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7471        if (!topts)
7472                return;
7473
7474        tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7475                            GFP_KERNEL);
7476        if (!tr_topts) {
7477                kfree(topts);
7478                return;
7479        }
7480
7481        tr->topts = tr_topts;
7482        tr->topts[tr->nr_topts].tracer = tracer;
7483        tr->topts[tr->nr_topts].topts = topts;
7484        tr->nr_topts++;
7485
7486        for (cnt = 0; opts[cnt].name; cnt++) {
7487                create_trace_option_file(tr, &topts[cnt], flags,
7488                                         &opts[cnt]);
7489                WARN_ONCE(topts[cnt].entry == NULL,
7490                          "Failed to create trace option: %s",
7491                          opts[cnt].name);
7492        }
7493}
7494
7495static struct dentry *
7496create_trace_option_core_file(struct trace_array *tr,
7497                              const char *option, long index)
7498{
7499        struct dentry *t_options;
7500
7501        t_options = trace_options_init_dentry(tr);
7502        if (!t_options)
7503                return NULL;
7504
7505        return trace_create_file(option, 0644, t_options,
7506                                 (void *)&tr->trace_flags_index[index],
7507                                 &trace_options_core_fops);
7508}
7509
7510static void create_trace_options_dir(struct trace_array *tr)
7511{
7512        struct dentry *t_options;
7513        bool top_level = tr == &global_trace;
7514        int i;
7515
7516        t_options = trace_options_init_dentry(tr);
7517        if (!t_options)
7518                return;
7519
7520        for (i = 0; trace_options[i]; i++) {
7521                if (top_level ||
7522                    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7523                        create_trace_option_core_file(tr, trace_options[i], i);
7524        }
7525}
7526
7527static ssize_t
7528rb_simple_read(struct file *filp, char __user *ubuf,
7529               size_t cnt, loff_t *ppos)
7530{
7531        struct trace_array *tr = filp->private_data;
7532        char buf[64];
7533        int r;
7534
7535        r = tracer_tracing_is_on(tr);
7536        r = sprintf(buf, "%d\n", r);
7537
7538        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7539}
7540
7541static ssize_t
7542rb_simple_write(struct file *filp, const char __user *ubuf,
7543                size_t cnt, loff_t *ppos)
7544{
7545        struct trace_array *tr = filp->private_data;
7546        struct ring_buffer *buffer = tr->trace_buffer.buffer;
7547        unsigned long val;
7548        int ret;
7549
7550        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7551        if (ret)
7552                return ret;
7553
7554        if (buffer) {
7555                mutex_lock(&trace_types_lock);
7556                if (val) {
7557                        tracer_tracing_on(tr);
7558                        if (tr->current_trace->start)
7559                                tr->current_trace->start(tr);
7560                } else {
7561                        tracer_tracing_off(tr);
7562                        if (tr->current_trace->stop)
7563                                tr->current_trace->stop(tr);
7564                }
7565                mutex_unlock(&trace_types_lock);
7566        }
7567
7568        (*ppos)++;
7569
7570        return cnt;
7571}
7572
7573static const struct file_operations rb_simple_fops = {
7574        .open           = tracing_open_generic_tr,
7575        .read           = rb_simple_read,
7576        .write          = rb_simple_write,
7577        .release        = tracing_release_generic_tr,
7578        .llseek         = default_llseek,
7579};
7580
7581struct dentry *trace_instance_dir;
7582
7583static void
7584init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7585
7586static int
7587allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7588{
7589        enum ring_buffer_flags rb_flags;
7590
7591        rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7592
7593        buf->tr = tr;
7594
7595        buf->buffer = ring_buffer_alloc(size, rb_flags);
7596        if (!buf->buffer)
7597                return -ENOMEM;
7598
7599        buf->data = alloc_percpu(struct trace_array_cpu);
7600        if (!buf->data) {
7601                ring_buffer_free(buf->buffer);
7602                return -ENOMEM;
7603        }
7604
7605        /* Allocate the first page for all buffers */
7606        set_buffer_entries(&tr->trace_buffer,
7607                           ring_buffer_size(tr->trace_buffer.buffer, 0));
7608
7609        return 0;
7610}
7611
7612static int allocate_trace_buffers(struct trace_array *tr, int size)
7613{
7614        int ret;
7615
7616        ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7617        if (ret)
7618                return ret;
7619
7620#ifdef CONFIG_TRACER_MAX_TRACE
7621        ret = allocate_trace_buffer(tr, &tr->max_buffer,
7622                                    allocate_snapshot ? size : 1);
7623        if (WARN_ON(ret)) {
7624                ring_buffer_free(tr->trace_buffer.buffer);
7625                free_percpu(tr->trace_buffer.data);
7626                return -ENOMEM;
7627        }
7628        tr->allocated_snapshot = allocate_snapshot;
7629
7630        /*
7631         * Only the top level trace array gets its snapshot allocated
7632         * from the kernel command line.
7633         */
7634        allocate_snapshot = false;
7635#endif
7636        return 0;
7637}
7638
7639static void free_trace_buffer(struct trace_buffer *buf)
7640{
7641        if (buf->buffer) {
7642                ring_buffer_free(buf->buffer);
7643                buf->buffer = NULL;
7644                free_percpu(buf->data);
7645                buf->data = NULL;
7646        }
7647}
7648
7649static void free_trace_buffers(struct trace_array *tr)
7650{
7651        if (!tr)
7652                return;
7653
7654        free_trace_buffer(&tr->trace_buffer);
7655
7656#ifdef CONFIG_TRACER_MAX_TRACE
7657        free_trace_buffer(&tr->max_buffer);
7658#endif
7659}
7660
7661static void init_trace_flags_index(struct trace_array *tr)
7662{
7663        int i;
7664
7665        /* Used by the trace options files */
7666        for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7667                tr->trace_flags_index[i] = i;
7668}
7669
7670static void __update_tracer_options(struct trace_array *tr)
7671{
7672        struct tracer *t;
7673
7674        for (t = trace_types; t; t = t->next)
7675                add_tracer_options(tr, t);
7676}
7677
7678static void update_tracer_options(struct trace_array *tr)
7679{
7680        mutex_lock(&trace_types_lock);
7681        __update_tracer_options(tr);
7682        mutex_unlock(&trace_types_lock);
7683}
7684
7685static int instance_mkdir(const char *name)
7686{
7687        struct trace_array *tr;
7688        int ret;
7689
7690        mutex_lock(&trace_types_lock);
7691
7692        ret = -EEXIST;
7693        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7694                if (tr->name && strcmp(tr->name, name) == 0)
7695                        goto out_unlock;
7696        }
7697
7698        ret = -ENOMEM;
7699        tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7700        if (!tr)
7701                goto out_unlock;
7702
7703        tr->name = kstrdup(name, GFP_KERNEL);
7704        if (!tr->name)
7705                goto out_free_tr;
7706
7707        if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7708                goto out_free_tr;
7709
7710        tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7711
7712        cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7713
7714        raw_spin_lock_init(&tr->start_lock);
7715
7716        tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7717
7718        tr->current_trace = &nop_trace;
7719
7720        INIT_LIST_HEAD(&tr->systems);
7721        INIT_LIST_HEAD(&tr->events);
7722
7723        if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7724                goto out_free_tr;
7725
7726        tr->dir = tracefs_create_dir(name, trace_instance_dir);
7727        if (!tr->dir)
7728                goto out_free_tr;
7729
7730        ret = event_trace_add_tracer(tr->dir, tr);
7731        if (ret) {
7732                tracefs_remove_recursive(tr->dir);
7733                goto out_free_tr;
7734        }
7735
7736        ftrace_init_trace_array(tr);
7737
7738        init_tracer_tracefs(tr, tr->dir);
7739        init_trace_flags_index(tr);
7740        __update_tracer_options(tr);
7741
7742        list_add(&tr->list, &ftrace_trace_arrays);
7743
7744        mutex_unlock(&trace_types_lock);
7745
7746        return 0;
7747
7748 out_free_tr:
7749        free_trace_buffers(tr);
7750        free_cpumask_var(tr->tracing_cpumask);
7751        kfree(tr->name);
7752        kfree(tr);
7753
7754 out_unlock:
7755        mutex_unlock(&trace_types_lock);
7756
7757        return ret;
7758
7759}
7760
7761static int instance_rmdir(const char *name)
7762{
7763        struct trace_array *tr;
7764        int found = 0;
7765        int ret;
7766        int i;
7767
7768        mutex_lock(&trace_types_lock);
7769
7770        ret = -ENODEV;
7771        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7772                if (tr->name && strcmp(tr->name, name) == 0) {
7773                        found = 1;
7774                        break;
7775                }
7776        }
7777        if (!found)
7778                goto out_unlock;
7779
7780        ret = -EBUSY;
7781        if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7782                goto out_unlock;
7783
7784        list_del(&tr->list);
7785
7786        /* Disable all the flags that were enabled coming in */
7787        for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7788                if ((1 << i) & ZEROED_TRACE_FLAGS)
7789                        set_tracer_flag(tr, 1 << i, 0);
7790        }
7791
7792        tracing_set_nop(tr);
7793        clear_ftrace_function_probes(tr);
7794        event_trace_del_tracer(tr);
7795        ftrace_clear_pids(tr);
7796        ftrace_destroy_function_files(tr);
7797        tracefs_remove_recursive(tr->dir);
7798        free_trace_buffers(tr);
7799
7800        for (i = 0; i < tr->nr_topts; i++) {
7801                kfree(tr->topts[i].topts);
7802        }
7803        kfree(tr->topts);
7804
7805        free_cpumask_var(tr->tracing_cpumask);
7806        kfree(tr->name);
7807        kfree(tr);
7808
7809        ret = 0;
7810
7811 out_unlock:
7812        mutex_unlock(&trace_types_lock);
7813
7814        return ret;
7815}
7816
7817static __init void create_trace_instances(struct dentry *d_tracer)
7818{
7819        trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7820                                                         instance_mkdir,
7821                                                         instance_rmdir);
7822        if (WARN_ON(!trace_instance_dir))
7823                return;
7824}
7825
7826static void
7827init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7828{
7829        int cpu;
7830
7831        trace_create_file("available_tracers", 0444, d_tracer,
7832                        tr, &show_traces_fops);
7833
7834        trace_create_file("current_tracer", 0644, d_tracer,
7835                        tr, &set_tracer_fops);
7836
7837        trace_create_file("tracing_cpumask", 0644, d_tracer,
7838                          tr, &tracing_cpumask_fops);
7839
7840        trace_create_file("trace_options", 0644, d_tracer,
7841                          tr, &tracing_iter_fops);
7842
7843        trace_create_file("trace", 0644, d_tracer,
7844                          tr, &tracing_fops);
7845
7846        trace_create_file("trace_pipe", 0444, d_tracer,
7847                          tr, &tracing_pipe_fops);
7848
7849        trace_create_file("buffer_size_kb", 0644, d_tracer,
7850                          tr, &tracing_entries_fops);
7851
7852        trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7853                          tr, &tracing_total_entries_fops);
7854
7855        trace_create_file("free_buffer", 0200, d_tracer,
7856                          tr, &tracing_free_buffer_fops);
7857
7858        trace_create_file("trace_marker", 0220, d_tracer,
7859                          tr, &tracing_mark_fops);
7860
7861        trace_create_file("trace_marker_raw", 0220, d_tracer,
7862                          tr, &tracing_mark_raw_fops);
7863
7864        trace_create_file("trace_clock", 0644, d_tracer, tr,
7865                          &trace_clock_fops);
7866
7867        trace_create_file("tracing_on", 0644, d_tracer,
7868                          tr, &rb_simple_fops);
7869
7870        create_trace_options_dir(tr);
7871
7872#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7873        trace_create_file("tracing_max_latency", 0644, d_tracer,
7874                        &tr->max_latency, &tracing_max_lat_fops);
7875#endif
7876
7877        if (ftrace_create_function_files(tr, d_tracer))
7878                WARN(1, "Could not allocate function filter files");
7879
7880#ifdef CONFIG_TRACER_SNAPSHOT
7881        trace_create_file("snapshot", 0644, d_tracer,
7882                          tr, &snapshot_fops);
7883#endif
7884
7885        for_each_tracing_cpu(cpu)
7886                tracing_init_tracefs_percpu(tr, cpu);
7887
7888        ftrace_init_tracefs(tr, d_tracer);
7889}
7890
7891static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
7892{
7893        struct vfsmount *mnt;
7894        struct file_system_type *type;
7895
7896        /*
7897         * To maintain backward compatibility for tools that mount
7898         * debugfs to get to the tracing facility, tracefs is automatically
7899         * mounted to the debugfs/tracing directory.
7900         */
7901        type = get_fs_type("tracefs");
7902        if (!type)
7903                return NULL;
7904        mnt = vfs_submount(mntpt, type, "tracefs", NULL);
7905        put_filesystem(type);
7906        if (IS_ERR(mnt))
7907                return NULL;
7908        mntget(mnt);
7909
7910        return mnt;
7911}
7912
7913/**
7914 * tracing_init_dentry - initialize top level trace array
7915 *
7916 * This is called when creating files or directories in the tracing
7917 * directory. It is called via fs_initcall() by any of the boot up code
7918 * and expects to return the dentry of the top level tracing directory.
7919 */
7920struct dentry *tracing_init_dentry(void)
7921{
7922        struct trace_array *tr = &global_trace;
7923
7924        /* The top level trace array uses  NULL as parent */
7925        if (tr->dir)
7926                return NULL;
7927
7928        if (WARN_ON(!tracefs_initialized()) ||
7929                (IS_ENABLED(CONFIG_DEBUG_FS) &&
7930                 WARN_ON(!debugfs_initialized())))
7931                return ERR_PTR(-ENODEV);
7932
7933        /*
7934         * As there may still be users that expect the tracing
7935         * files to exist in debugfs/tracing, we must automount
7936         * the tracefs file system there, so older tools still
7937         * work with the newer kerenl.
7938         */
7939        tr->dir = debugfs_create_automount("tracing", NULL,
7940                                           trace_automount, NULL);
7941        if (!tr->dir) {
7942                pr_warn_once("Could not create debugfs directory 'tracing'\n");
7943                return ERR_PTR(-ENOMEM);
7944        }
7945
7946        return NULL;
7947}
7948
7949extern struct trace_eval_map *__start_ftrace_eval_maps[];
7950extern struct trace_eval_map *__stop_ftrace_eval_maps[];
7951
7952static void __init trace_eval_init(void)
7953{
7954        int len;
7955
7956        len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
7957        trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
7958}
7959
7960#ifdef CONFIG_MODULES
7961static void trace_module_add_evals(struct module *mod)
7962{
7963        if (!mod->num_trace_evals)
7964                return;
7965
7966        /*
7967         * Modules with bad taint do not have events created, do
7968         * not bother with enums either.
7969         */
7970        if (trace_module_has_bad_taint(mod))
7971                return;
7972
7973        trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
7974}
7975
7976#ifdef CONFIG_TRACE_EVAL_MAP_FILE
7977static void trace_module_remove_evals(struct module *mod)
7978{
7979        union trace_eval_map_item *map;
7980        union trace_eval_map_item **last = &trace_eval_maps;
7981
7982        if (!mod->num_trace_evals)
7983                return;
7984
7985        mutex_lock(&trace_eval_mutex);
7986
7987        map = trace_eval_maps;
7988
7989        while (map) {
7990                if (map->head.mod == mod)
7991                        break;
7992                map = trace_eval_jmp_to_tail(map);
7993                last = &map->tail.next;
7994                map = map->tail.next;
7995        }
7996        if (!map)
7997                goto out;
7998
7999        *last = trace_eval_jmp_to_tail(map)->tail.next;
8000        kfree(map);
8001 out:
8002        mutex_unlock(&trace_eval_mutex);
8003}
8004#else
8005static inline void trace_module_remove_evals(struct module *mod) { }
8006#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8007
8008static int trace_module_notify(struct notifier_block *self,
8009                               unsigned long val, void *data)
8010{
8011        struct module *mod = data;
8012
8013        switch (val) {
8014        case MODULE_STATE_COMING:
8015                trace_module_add_evals(mod);
8016                break;
8017        case MODULE_STATE_GOING:
8018                trace_module_remove_evals(mod);
8019                break;
8020        }
8021
8022        return 0;
8023}
8024
8025static struct notifier_block trace_module_nb = {
8026        .notifier_call = trace_module_notify,
8027        .priority = 0,
8028};
8029#endif /* CONFIG_MODULES */
8030
8031static __init int tracer_init_tracefs(void)
8032{
8033        struct dentry *d_tracer;
8034
8035        trace_access_lock_init();
8036
8037        d_tracer = tracing_init_dentry();
8038        if (IS_ERR(d_tracer))
8039                return 0;
8040
8041        init_tracer_tracefs(&global_trace, d_tracer);
8042        ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8043
8044        trace_create_file("tracing_thresh", 0644, d_tracer,
8045                        &global_trace, &tracing_thresh_fops);
8046
8047        trace_create_file("README", 0444, d_tracer,
8048                        NULL, &tracing_readme_fops);
8049
8050        trace_create_file("saved_cmdlines", 0444, d_tracer,
8051                        NULL, &tracing_saved_cmdlines_fops);
8052
8053        trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8054                          NULL, &tracing_saved_cmdlines_size_fops);
8055
8056        trace_create_file("saved_tgids", 0444, d_tracer,
8057                        NULL, &tracing_saved_tgids_fops);
8058
8059        trace_eval_init();
8060
8061        trace_create_eval_file(d_tracer);
8062
8063#ifdef CONFIG_MODULES
8064        register_module_notifier(&trace_module_nb);
8065#endif
8066
8067#ifdef CONFIG_DYNAMIC_FTRACE
8068        trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8069                        &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8070#endif
8071
8072        create_trace_instances(d_tracer);
8073
8074        update_tracer_options(&global_trace);
8075
8076        return 0;
8077}
8078
8079static int trace_panic_handler(struct notifier_block *this,
8080                               unsigned long event, void *unused)
8081{
8082        if (ftrace_dump_on_oops)
8083                ftrace_dump(ftrace_dump_on_oops);
8084        return NOTIFY_OK;
8085}
8086
8087static struct notifier_block trace_panic_notifier = {
8088        .notifier_call  = trace_panic_handler,
8089        .next           = NULL,
8090        .priority       = 150   /* priority: INT_MAX >= x >= 0 */
8091};
8092
8093static int trace_die_handler(struct notifier_block *self,
8094                             unsigned long val,
8095                             void *data)
8096{
8097        switch (val) {
8098        case DIE_OOPS:
8099                if (ftrace_dump_on_oops)
8100                        ftrace_dump(ftrace_dump_on_oops);
8101                break;
8102        default:
8103                break;
8104        }
8105        return NOTIFY_OK;
8106}
8107
8108static struct notifier_block trace_die_notifier = {
8109        .notifier_call = trace_die_handler,
8110        .priority = 200
8111};
8112
8113/*
8114 * printk is set to max of 1024, we really don't need it that big.
8115 * Nothing should be printing 1000 characters anyway.
8116 */
8117#define TRACE_MAX_PRINT         1000
8118
8119/*
8120 * Define here KERN_TRACE so that we have one place to modify
8121 * it if we decide to change what log level the ftrace dump
8122 * should be at.
8123 */
8124#define KERN_TRACE              KERN_EMERG
8125
8126void
8127trace_printk_seq(struct trace_seq *s)
8128{
8129        /* Probably should print a warning here. */
8130        if (s->seq.len >= TRACE_MAX_PRINT)
8131                s->seq.len = TRACE_MAX_PRINT;
8132
8133        /*
8134         * More paranoid code. Although the buffer size is set to
8135         * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8136         * an extra layer of protection.
8137         */
8138        if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8139                s->seq.len = s->seq.size - 1;
8140
8141        /* should be zero ended, but we are paranoid. */
8142        s->buffer[s->seq.len] = 0;
8143
8144        printk(KERN_TRACE "%s", s->buffer);
8145
8146        trace_seq_init(s);
8147}
8148
8149void trace_init_global_iter(struct trace_iterator *iter)
8150{
8151        iter->tr = &global_trace;
8152        iter->trace = iter->tr->current_trace;
8153        iter->cpu_file = RING_BUFFER_ALL_CPUS;
8154        iter->trace_buffer = &global_trace.trace_buffer;
8155
8156        if (iter->trace && iter->trace->open)
8157                iter->trace->open(iter);
8158
8159        /* Annotate start of buffers if we had overruns */
8160        if (ring_buffer_overruns(iter->trace_buffer->buffer))
8161                iter->iter_flags |= TRACE_FILE_ANNOTATE;
8162
8163        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8164        if (trace_clocks[iter->tr->clock_id].in_ns)
8165                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8166}
8167
8168void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8169{
8170        /* use static because iter can be a bit big for the stack */
8171        static struct trace_iterator iter;
8172        static atomic_t dump_running;
8173        struct trace_array *tr = &global_trace;
8174        unsigned int old_userobj;
8175        unsigned long flags;
8176        int cnt = 0, cpu;
8177
8178        /* Only allow one dump user at a time. */
8179        if (atomic_inc_return(&dump_running) != 1) {
8180                atomic_dec(&dump_running);
8181                return;
8182        }
8183
8184        /*
8185         * Always turn off tracing when we dump.
8186         * We don't need to show trace output of what happens
8187         * between multiple crashes.
8188         *
8189         * If the user does a sysrq-z, then they can re-enable
8190         * tracing with echo 1 > tracing_on.
8191         */
8192        tracing_off();
8193
8194        local_irq_save(flags);
8195
8196        /* Simulate the iterator */
8197        trace_init_global_iter(&iter);
8198
8199        for_each_tracing_cpu(cpu) {
8200                atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8201        }
8202
8203        old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8204
8205        /* don't look at user memory in panic mode */
8206        tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8207
8208        switch (oops_dump_mode) {
8209        case DUMP_ALL:
8210                iter.cpu_file = RING_BUFFER_ALL_CPUS;
8211                break;
8212        case DUMP_ORIG:
8213                iter.cpu_file = raw_smp_processor_id();
8214                break;
8215        case DUMP_NONE:
8216                goto out_enable;
8217        default:
8218                printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8219                iter.cpu_file = RING_BUFFER_ALL_CPUS;
8220        }
8221
8222        printk(KERN_TRACE "Dumping ftrace buffer:\n");
8223
8224        /* Did function tracer already get disabled? */
8225        if (ftrace_is_dead()) {
8226                printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8227                printk("#          MAY BE MISSING FUNCTION EVENTS\n");
8228        }
8229
8230        /*
8231         * We need to stop all tracing on all CPUS to read the
8232         * the next buffer. This is a bit expensive, but is
8233         * not done often. We fill all what we can read,
8234         * and then release the locks again.
8235         */
8236
8237        while (!trace_empty(&iter)) {
8238
8239                if (!cnt)
8240                        printk(KERN_TRACE "---------------------------------\n");
8241
8242                cnt++;
8243
8244                /* reset all but tr, trace, and overruns */
8245                memset(&iter.seq, 0,
8246                       sizeof(struct trace_iterator) -
8247                       offsetof(struct trace_iterator, seq));
8248                iter.iter_flags |= TRACE_FILE_LAT_FMT;
8249                iter.pos = -1;
8250
8251                if (trace_find_next_entry_inc(&iter) != NULL) {
8252                        int ret;
8253
8254                        ret = print_trace_line(&iter);
8255                        if (ret != TRACE_TYPE_NO_CONSUME)
8256                                trace_consume(&iter);
8257                }
8258                touch_nmi_watchdog();
8259
8260                trace_printk_seq(&iter.seq);
8261        }
8262
8263        if (!cnt)
8264                printk(KERN_TRACE "   (ftrace buffer empty)\n");
8265        else
8266                printk(KERN_TRACE "---------------------------------\n");
8267
8268 out_enable:
8269        tr->trace_flags |= old_userobj;
8270
8271        for_each_tracing_cpu(cpu) {
8272                atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8273        }
8274        atomic_dec(&dump_running);
8275        local_irq_restore(flags);
8276}
8277EXPORT_SYMBOL_GPL(ftrace_dump);
8278
8279__init static int tracer_alloc_buffers(void)
8280{
8281        int ring_buf_size;
8282        int ret = -ENOMEM;
8283
8284        /*
8285         * Make sure we don't accidently add more trace options
8286         * than we have bits for.
8287         */
8288        BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8289
8290        if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8291                goto out;
8292
8293        if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8294                goto out_free_buffer_mask;
8295
8296        /* Only allocate trace_printk buffers if a trace_printk exists */
8297        if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
8298                /* Must be called before global_trace.buffer is allocated */
8299                trace_printk_init_buffers();
8300
8301        /* To save memory, keep the ring buffer size to its minimum */
8302        if (ring_buffer_expanded)
8303                ring_buf_size = trace_buf_size;
8304        else
8305                ring_buf_size = 1;
8306
8307        cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8308        cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8309
8310        raw_spin_lock_init(&global_trace.start_lock);
8311
8312        /*
8313         * The prepare callbacks allocates some memory for the ring buffer. We
8314         * don't free the buffer if the if the CPU goes down. If we were to free
8315         * the buffer, then the user would lose any trace that was in the
8316         * buffer. The memory will be removed once the "instance" is removed.
8317         */
8318        ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8319                                      "trace/RB:preapre", trace_rb_cpu_prepare,
8320                                      NULL);
8321        if (ret < 0)
8322                goto out_free_cpumask;
8323        /* Used for event triggers */
8324        ret = -ENOMEM;
8325        temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8326        if (!temp_buffer)
8327                goto out_rm_hp_state;
8328
8329        if (trace_create_savedcmd() < 0)
8330                goto out_free_temp_buffer;
8331
8332        /* TODO: make the number of buffers hot pluggable with CPUS */
8333        if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8334                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8335                WARN_ON(1);
8336                goto out_free_savedcmd;
8337        }
8338
8339        if (global_trace.buffer_disabled)
8340                tracing_off();
8341
8342        if (trace_boot_clock) {
8343                ret = tracing_set_clock(&global_trace, trace_boot_clock);
8344                if (ret < 0)
8345                        pr_warn("Trace clock %s not defined, going back to default\n",
8346                                trace_boot_clock);
8347        }
8348
8349        /*
8350         * register_tracer() might reference current_trace, so it
8351         * needs to be set before we register anything. This is
8352         * just a bootstrap of current_trace anyway.
8353         */
8354        global_trace.current_trace = &nop_trace;
8355
8356        global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8357
8358        ftrace_init_global_array_ops(&global_trace);
8359
8360        init_trace_flags_index(&global_trace);
8361
8362        register_tracer(&nop_trace);
8363
8364        /* Function tracing may start here (via kernel command line) */
8365        init_function_trace();
8366
8367        /* All seems OK, enable tracing */
8368        tracing_disabled = 0;
8369
8370        atomic_notifier_chain_register(&panic_notifier_list,
8371                                       &trace_panic_notifier);
8372
8373        register_die_notifier(&trace_die_notifier);
8374
8375        global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8376
8377        INIT_LIST_HEAD(&global_trace.systems);
8378        INIT_LIST_HEAD(&global_trace.events);
8379        list_add(&global_trace.list, &ftrace_trace_arrays);
8380
8381        apply_trace_boot_options();
8382
8383        register_snapshot_cmd();
8384
8385        return 0;
8386
8387out_free_savedcmd:
8388        free_saved_cmdlines_buffer(savedcmd);
8389out_free_temp_buffer:
8390        ring_buffer_free(temp_buffer);
8391out_rm_hp_state:
8392        cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8393out_free_cpumask:
8394        free_cpumask_var(global_trace.tracing_cpumask);
8395out_free_buffer_mask:
8396        free_cpumask_var(tracing_buffer_mask);
8397out:
8398        return ret;
8399}
8400
8401void __init early_trace_init(void)
8402{
8403        if (tracepoint_printk) {
8404                tracepoint_print_iter =
8405                        kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8406                if (WARN_ON(!tracepoint_print_iter))
8407                        tracepoint_printk = 0;
8408                else
8409                        static_key_enable(&tracepoint_printk_key.key);
8410        }
8411        tracer_alloc_buffers();
8412}
8413
8414void __init trace_init(void)
8415{
8416        trace_event_init();
8417}
8418
8419__init static int clear_boot_tracer(void)
8420{
8421        /*
8422         * The default tracer at boot buffer is an init section.
8423         * This function is called in lateinit. If we did not
8424         * find the boot tracer, then clear it out, to prevent
8425         * later registration from accessing the buffer that is
8426         * about to be freed.
8427         */
8428        if (!default_bootup_tracer)
8429                return 0;
8430
8431        printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8432               default_bootup_tracer);
8433        default_bootup_tracer = NULL;
8434
8435        return 0;
8436}
8437
8438fs_initcall(tracer_init_tracefs);
8439late_initcall_sync(clear_boot_tracer);
8440